Module pachyderm_sdk.api.pps

Expand source code
# Generated by the protocol buffer compiler.  DO NOT EDIT!
# sources: api/pps/pps.proto
# plugin: python-betterproto
# This file has been @generated
import warnings
from dataclasses import dataclass
from datetime import (
    datetime,
    timedelta,
)
from typing import (
    TYPE_CHECKING,
    AsyncIterable,
    AsyncIterator,
    Dict,
    Iterable,
    Iterator,
    List,
    Optional,
    Union,
)

import betterproto
import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf
import grpc

from .. import (
    pfs as _pfs__,
    taskapi as _taskapi__,
)


if TYPE_CHECKING:
    import grpc


class JobState(betterproto.Enum):
    JOB_STATE_UNKNOWN = 0
    JOB_CREATED = 1
    JOB_STARTING = 2
    JOB_RUNNING = 3
    JOB_FAILURE = 4
    JOB_SUCCESS = 5
    JOB_KILLED = 6
    JOB_EGRESSING = 7
    JOB_FINISHING = 8
    JOB_UNRUNNABLE = 9


class DatumState(betterproto.Enum):
    UNKNOWN = 0
    FAILED = 1
    SUCCESS = 2
    SKIPPED = 3
    STARTING = 4
    RECOVERED = 5


class WorkerState(betterproto.Enum):
    WORKER_STATE_UNKNOWN = 0
    POD_RUNNING = 1
    POD_SUCCESS = 2
    POD_FAILED = 3


class PipelineState(betterproto.Enum):
    PIPELINE_STATE_UNKNOWN = 0
    PIPELINE_STARTING = 1
    """
    There is a PipelineInfo + spec commit, but no RC This happens when a
    pipeline has been created but not yet picked up by a PPS server.
    """

    PIPELINE_RUNNING = 2
    """
    A pipeline has a spec commit and a service + RC This is the normal state of
    a pipeline.
    """

    PIPELINE_RESTARTING = 3
    """
    Equivalent to STARTING (there is a PipelineInfo + commit, but no RC) After
    some error caused runPipeline to exit, but before the pipeline is re-run.
    This is when the exponential backoff is in effect.
    """

    PIPELINE_FAILURE = 4
    """
    The pipeline has encountered unrecoverable errors and is no longer being
    retried. It won't leave this state until the pipeline is updated.
    """

    PIPELINE_PAUSED = 5
    """
    The pipeline has been explicitly paused by the user (the pipeline spec's
    Stopped field should be true if the pipeline is in this state)
    """

    PIPELINE_STANDBY = 6
    """
    The pipeline is fully functional, but there are no commits to process.
    """

    PIPELINE_CRASHING = 7
    """
    The pipeline's workers are crashing, or failing to come up, this may
    resolve itself, the pipeline may make progress while in this state if the
    problem is only being experienced by some workers.
    """


class TolerationOperator(betterproto.Enum):
    """TolerationOperator relates a Toleration's key to its value."""

    EMPTY = 0
    EXISTS = 1
    EQUAL = 2


class TaintEffect(betterproto.Enum):
    """TaintEffect is an effect that can be matched by a toleration."""

    ALL_EFFECTS = 0
    NO_SCHEDULE = 1
    PREFER_NO_SCHEDULE = 2
    NO_EXECUTE = 3


class PipelineInfoPipelineType(betterproto.Enum):
    """
    The pipeline type is stored here so that we can internally know the type of
    the pipeline without loading the spec from PFS.
    """

    PIPELINT_TYPE_UNKNOWN = 0
    PIPELINE_TYPE_TRANSFORM = 1
    PIPELINE_TYPE_SPOUT = 2
    PIPELINE_TYPE_SERVICE = 3


@dataclass(eq=False, repr=False)
class SecretMount(betterproto.Message):
    name: str = betterproto.string_field(1)
    """Name must be the name of the secret in kubernetes."""

    key: str = betterproto.string_field(2)
    """
    Key of the secret to load into env_var, this field only has meaning if
    EnvVar != "".
    """

    mount_path: str = betterproto.string_field(3)
    env_var: str = betterproto.string_field(4)


@dataclass(eq=False, repr=False)
class Transform(betterproto.Message):
    image: str = betterproto.string_field(1)
    cmd: List[str] = betterproto.string_field(2)
    err_cmd: List[str] = betterproto.string_field(3)
    env: Dict[str, str] = betterproto.map_field(
        4, betterproto.TYPE_STRING, betterproto.TYPE_STRING
    )
    secrets: List["SecretMount"] = betterproto.message_field(5)
    image_pull_secrets: List[str] = betterproto.string_field(6)
    stdin: List[str] = betterproto.string_field(7)
    err_stdin: List[str] = betterproto.string_field(8)
    accept_return_code: List[int] = betterproto.int64_field(9)
    debug: bool = betterproto.bool_field(10)
    user: str = betterproto.string_field(11)
    working_dir: str = betterproto.string_field(12)
    dockerfile: str = betterproto.string_field(13)
    memory_volume: bool = betterproto.bool_field(14)
    datum_batching: bool = betterproto.bool_field(15)


@dataclass(eq=False, repr=False)
class TfJob(betterproto.Message):
    tf_job: str = betterproto.string_field(1)
    """
    tf_job  is a serialized Kubeflow TFJob spec. Pachyderm sends this directly
    to a kubernetes cluster on which kubeflow has been installed, instead of
    creating a pipeline ReplicationController as it normally would.
    """


@dataclass(eq=False, repr=False)
class Egress(betterproto.Message):
    url: str = betterproto.string_field(1)
    object_storage: "_pfs__.ObjectStorageEgress" = betterproto.message_field(
        2, group="target"
    )
    sql_database: "_pfs__.SqlDatabaseEgress" = betterproto.message_field(
        3, group="target"
    )


@dataclass(eq=False, repr=False)
class Determined(betterproto.Message):
    workspaces: List[str] = betterproto.string_field(1)


@dataclass(eq=False, repr=False)
class Job(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)
    id: str = betterproto.string_field(2)


@dataclass(eq=False, repr=False)
class Metadata(betterproto.Message):
    annotations: Dict[str, str] = betterproto.map_field(
        1, betterproto.TYPE_STRING, betterproto.TYPE_STRING
    )
    labels: Dict[str, str] = betterproto.map_field(
        2, betterproto.TYPE_STRING, betterproto.TYPE_STRING
    )


@dataclass(eq=False, repr=False)
class Service(betterproto.Message):
    internal_port: int = betterproto.int32_field(1)
    external_port: int = betterproto.int32_field(2)
    ip: str = betterproto.string_field(3)
    type: str = betterproto.string_field(4)


@dataclass(eq=False, repr=False)
class Spout(betterproto.Message):
    service: "Service" = betterproto.message_field(1)


@dataclass(eq=False, repr=False)
class PfsInput(betterproto.Message):
    project: str = betterproto.string_field(14)
    name: str = betterproto.string_field(1)
    repo: str = betterproto.string_field(2)
    repo_type: str = betterproto.string_field(13)
    branch: str = betterproto.string_field(3)
    commit: str = betterproto.string_field(4)
    glob: str = betterproto.string_field(5)
    join_on: str = betterproto.string_field(6)
    outer_join: bool = betterproto.bool_field(7)
    group_by: str = betterproto.string_field(8)
    lazy: bool = betterproto.bool_field(9)
    empty_files: bool = betterproto.bool_field(10)
    """
    EmptyFiles, if true, will cause files from this PFS input to be presented
    as empty files. This is useful in shuffle pipelines where you want to read
    the names of files and reorganize them using symlinks.
    """

    s3: bool = betterproto.bool_field(11)
    """
    S3, if true, will cause the worker to NOT download or link files from this
    input into the /pfs directory. Instead, an instance of our S3 gateway
    service will run on each of the sidecars, and data can be retrieved from
    this input by querying http://<pipeline>-s3.<namespace>/<job
    id>.<input>/my/file
    """

    trigger: "_pfs__.Trigger" = betterproto.message_field(12)
    """
    Trigger defines when this input is processed by the pipeline, if it's nil
    the input is processed anytime something is committed to the input branch.
    """


@dataclass(eq=False, repr=False)
class CronInput(betterproto.Message):
    name: str = betterproto.string_field(1)
    project: str = betterproto.string_field(7)
    repo: str = betterproto.string_field(2)
    commit: str = betterproto.string_field(3)
    spec: str = betterproto.string_field(4)
    overwrite: bool = betterproto.bool_field(5)
    """
    Overwrite, if true, will expose a single datum that gets overwritten each
    tick. If false, it will create a new datum for each tick.
    """

    start: datetime = betterproto.message_field(6)


@dataclass(eq=False, repr=False)
class Input(betterproto.Message):
    pfs: "PfsInput" = betterproto.message_field(1)
    join: List["Input"] = betterproto.message_field(2)
    group: List["Input"] = betterproto.message_field(3)
    cross: List["Input"] = betterproto.message_field(4)
    union: List["Input"] = betterproto.message_field(5)
    cron: "CronInput" = betterproto.message_field(6)


@dataclass(eq=False, repr=False)
class JobInput(betterproto.Message):
    name: str = betterproto.string_field(1)
    commit: "_pfs__.Commit" = betterproto.message_field(2)
    glob: str = betterproto.string_field(3)
    lazy: bool = betterproto.bool_field(4)


@dataclass(eq=False, repr=False)
class ParallelismSpec(betterproto.Message):
    constant: int = betterproto.uint64_field(1)
    """
    Starts the pipeline/job with a 'constant' workers, unless 'constant' is
    zero. If 'constant' is zero (which is the zero value of ParallelismSpec),
    then Pachyderm will choose the number of workers that is started,
    (currently it chooses the number of workers in the cluster)
    """


@dataclass(eq=False, repr=False)
class InputFile(betterproto.Message):
    path: str = betterproto.string_field(1)
    """This file's absolute path within its pfs repo."""

    hash: bytes = betterproto.bytes_field(2)
    """This file's hash"""


@dataclass(eq=False, repr=False)
class Datum(betterproto.Message):
    job: "Job" = betterproto.message_field(1)
    """ID is the hash computed from all the files"""

    id: str = betterproto.string_field(2)


@dataclass(eq=False, repr=False)
class DatumInfo(betterproto.Message):
    datum: "Datum" = betterproto.message_field(1)
    state: "DatumState" = betterproto.enum_field(2)
    stats: "ProcessStats" = betterproto.message_field(3)
    pfs_state: "_pfs__.File" = betterproto.message_field(4)
    data: List["_pfs__.FileInfo"] = betterproto.message_field(5)
    image_id: str = betterproto.string_field(6)


@dataclass(eq=False, repr=False)
class Aggregate(betterproto.Message):
    count: int = betterproto.int64_field(1)
    mean: float = betterproto.double_field(2)
    stddev: float = betterproto.double_field(3)
    fifth_percentile: float = betterproto.double_field(4)
    ninety_fifth_percentile: float = betterproto.double_field(5)


@dataclass(eq=False, repr=False)
class ProcessStats(betterproto.Message):
    download_time: timedelta = betterproto.message_field(1)
    process_time: timedelta = betterproto.message_field(2)
    upload_time: timedelta = betterproto.message_field(3)
    download_bytes: int = betterproto.int64_field(4)
    upload_bytes: int = betterproto.int64_field(5)


@dataclass(eq=False, repr=False)
class AggregateProcessStats(betterproto.Message):
    download_time: "Aggregate" = betterproto.message_field(1)
    process_time: "Aggregate" = betterproto.message_field(2)
    upload_time: "Aggregate" = betterproto.message_field(3)
    download_bytes: "Aggregate" = betterproto.message_field(4)
    upload_bytes: "Aggregate" = betterproto.message_field(5)


@dataclass(eq=False, repr=False)
class WorkerStatus(betterproto.Message):
    worker_id: str = betterproto.string_field(1)
    job_id: str = betterproto.string_field(2)
    datum_status: "DatumStatus" = betterproto.message_field(3)


@dataclass(eq=False, repr=False)
class DatumStatus(betterproto.Message):
    started: datetime = betterproto.message_field(1)
    """Started is the time processing on the current datum began."""

    data: List["InputFile"] = betterproto.message_field(2)


@dataclass(eq=False, repr=False)
class ResourceSpec(betterproto.Message):
    """
    ResourceSpec describes the amount of resources that pipeline pods should
    request from kubernetes, for scheduling.
    """

    cpu: float = betterproto.float_field(1)
    """
    The number of CPUs each worker needs (partial values are allowed, and
    encouraged)
    """

    memory: str = betterproto.string_field(2)
    """
    The amount of memory each worker needs (in bytes, with allowed SI suffixes
    (M, K, G, Mi, Ki, Gi, etc).
    """

    gpu: "GpuSpec" = betterproto.message_field(3)
    """The spec for GPU resources."""

    disk: str = betterproto.string_field(4)
    """
    The amount of ephemeral storage each worker needs (in bytes, with allowed
    SI suffixes (M, K, G, Mi, Ki, Gi, etc).
    """


@dataclass(eq=False, repr=False)
class GpuSpec(betterproto.Message):
    type: str = betterproto.string_field(1)
    """The type of GPU (nvidia.com/gpu or amd.com/gpu for example)."""

    number: int = betterproto.int64_field(2)
    """The number of GPUs to request."""


@dataclass(eq=False, repr=False)
class JobSetInfo(betterproto.Message):
    job_set: "JobSet" = betterproto.message_field(1)
    jobs: List["JobInfo"] = betterproto.message_field(2)


@dataclass(eq=False, repr=False)
class JobInfo(betterproto.Message):
    """
    JobInfo is the data stored in the database regarding a given job.  The
    'details' field contains more information about the job which is expensive
    to fetch, requiring querying workers or loading the pipeline spec from
    object storage.
    """

    job: "Job" = betterproto.message_field(1)
    pipeline_version: int = betterproto.uint64_field(2)
    output_commit: "_pfs__.Commit" = betterproto.message_field(3)
    restart: int = betterproto.uint64_field(4)
    """Job restart count (e.g. due to datum failure)"""

    data_processed: int = betterproto.int64_field(5)
    """Counts of how many times we processed or skipped a datum"""

    data_skipped: int = betterproto.int64_field(6)
    data_total: int = betterproto.int64_field(7)
    data_failed: int = betterproto.int64_field(8)
    data_recovered: int = betterproto.int64_field(9)
    stats: "ProcessStats" = betterproto.message_field(10)
    """Download/process/upload time and download/upload bytes"""

    state: "JobState" = betterproto.enum_field(11)
    reason: str = betterproto.string_field(12)
    created: datetime = betterproto.message_field(13)
    started: datetime = betterproto.message_field(14)
    finished: datetime = betterproto.message_field(15)
    details: "JobInfoDetails" = betterproto.message_field(16)
    auth_token: str = betterproto.string_field(17)


@dataclass(eq=False, repr=False)
class JobInfoDetails(betterproto.Message):
    transform: "Transform" = betterproto.message_field(1)
    parallelism_spec: "ParallelismSpec" = betterproto.message_field(2)
    egress: "Egress" = betterproto.message_field(3)
    service: "Service" = betterproto.message_field(4)
    spout: "Spout" = betterproto.message_field(5)
    worker_status: List["WorkerStatus"] = betterproto.message_field(6)
    resource_requests: "ResourceSpec" = betterproto.message_field(7)
    resource_limits: "ResourceSpec" = betterproto.message_field(8)
    sidecar_resource_limits: "ResourceSpec" = betterproto.message_field(9)
    input: "Input" = betterproto.message_field(10)
    salt: str = betterproto.string_field(11)
    datum_set_spec: "DatumSetSpec" = betterproto.message_field(12)
    datum_timeout: timedelta = betterproto.message_field(13)
    job_timeout: timedelta = betterproto.message_field(14)
    datum_tries: int = betterproto.int64_field(15)
    scheduling_spec: "SchedulingSpec" = betterproto.message_field(16)
    pod_spec: str = betterproto.string_field(17)
    pod_patch: str = betterproto.string_field(18)
    sidecar_resource_requests: "ResourceSpec" = betterproto.message_field(19)


@dataclass(eq=False, repr=False)
class Worker(betterproto.Message):
    name: str = betterproto.string_field(1)
    state: "WorkerState" = betterproto.enum_field(2)


@dataclass(eq=False, repr=False)
class Pipeline(betterproto.Message):
    project: "_pfs__.Project" = betterproto.message_field(2)
    name: str = betterproto.string_field(1)


@dataclass(eq=False, repr=False)
class Toleration(betterproto.Message):
    """Toleration is a Kubernetes toleration."""

    key: str = betterproto.string_field(1)
    """
    key is the taint key that the toleration applies to.  Empty means match all
    taint keys.
    """

    operator: "TolerationOperator" = betterproto.enum_field(2)
    """operator represents a key's relationship to the value."""

    value: str = betterproto.string_field(3)
    """value is the taint value the toleration matches to."""

    effect: "TaintEffect" = betterproto.enum_field(4)
    """
    effect indicates the taint effect to match.  Empty means match all taint
    effects.
    """

    toleration_seconds: Optional[int] = betterproto.message_field(
        5, wraps=betterproto.TYPE_INT64
    )
    """
    toleration_seconds represents the period of time the toleration (which must
    be of effect NoExecute, otherwise this field is ignored) tolerates the
    taint.  If not set, tolerate the taint forever.
    """


@dataclass(eq=False, repr=False)
class PipelineInfo(betterproto.Message):
    """
    PipelineInfo is proto for each pipeline that Pachd stores in the database.
    It tracks the state of the pipeline, and points to its metadata in PFS
    (and, by pointing to a PFS commit, de facto tracks the pipeline's version).
    Any information about the pipeline _not_ stored in the database is in the
    Details object, which requires fetching the spec from PFS or other
    potentially expensive operations.
    """

    pipeline: "Pipeline" = betterproto.message_field(1)
    version: int = betterproto.uint64_field(2)
    spec_commit: "_pfs__.Commit" = betterproto.message_field(3)
    stopped: bool = betterproto.bool_field(4)
    state: "PipelineState" = betterproto.enum_field(5)
    """state indicates the current state of the pipeline"""

    reason: str = betterproto.string_field(6)
    """reason includes any error messages associated with a failed pipeline"""

    last_job_state: "JobState" = betterproto.enum_field(8)
    """last_job_state indicates the state of the most recently created job"""

    parallelism: int = betterproto.uint64_field(9)
    """
    parallelism tracks the literal number of workers that this pipeline should
    run.
    """

    type: "PipelineInfoPipelineType" = betterproto.enum_field(10)
    auth_token: str = betterproto.string_field(11)
    details: "PipelineInfoDetails" = betterproto.message_field(12)
    user_spec_json: str = betterproto.string_field(13)
    effective_spec_json: str = betterproto.string_field(14)


@dataclass(eq=False, repr=False)
class PipelineInfoDetails(betterproto.Message):
    transform: "Transform" = betterproto.message_field(1)
    tf_job: "TfJob" = betterproto.message_field(2)
    """
    tf_job encodes a Kubeflow TFJob spec. Pachyderm uses this to create TFJobs
    when running in a kubernetes cluster on which kubeflow has been installed.
    Exactly one of 'tf_job' and 'transform' should be set
    """

    parallelism_spec: "ParallelismSpec" = betterproto.message_field(3)
    egress: "Egress" = betterproto.message_field(4)
    created_at: datetime = betterproto.message_field(5)
    recent_error: str = betterproto.string_field(6)
    workers_requested: int = betterproto.int64_field(7)
    workers_available: int = betterproto.int64_field(8)
    output_branch: str = betterproto.string_field(9)
    resource_requests: "ResourceSpec" = betterproto.message_field(10)
    resource_limits: "ResourceSpec" = betterproto.message_field(11)
    sidecar_resource_limits: "ResourceSpec" = betterproto.message_field(12)
    input: "Input" = betterproto.message_field(13)
    description: str = betterproto.string_field(14)
    salt: str = betterproto.string_field(16)
    reason: str = betterproto.string_field(17)
    service: "Service" = betterproto.message_field(19)
    spout: "Spout" = betterproto.message_field(20)
    datum_set_spec: "DatumSetSpec" = betterproto.message_field(21)
    datum_timeout: timedelta = betterproto.message_field(22)
    job_timeout: timedelta = betterproto.message_field(23)
    datum_tries: int = betterproto.int64_field(24)
    scheduling_spec: "SchedulingSpec" = betterproto.message_field(25)
    pod_spec: str = betterproto.string_field(26)
    pod_patch: str = betterproto.string_field(27)
    s3_out: bool = betterproto.bool_field(28)
    metadata: "Metadata" = betterproto.message_field(29)
    reprocess_spec: str = betterproto.string_field(30)
    unclaimed_tasks: int = betterproto.int64_field(31)
    worker_rc: str = betterproto.string_field(32)
    autoscaling: bool = betterproto.bool_field(33)
    tolerations: List["Toleration"] = betterproto.message_field(34)
    sidecar_resource_requests: "ResourceSpec" = betterproto.message_field(35)
    determined: "Determined" = betterproto.message_field(36)
    maximum_expected_uptime: timedelta = betterproto.message_field(37)
    workers_started_at: datetime = betterproto.message_field(38)


@dataclass(eq=False, repr=False)
class PipelineInfos(betterproto.Message):
    pipeline_info: List["PipelineInfo"] = betterproto.message_field(1)


@dataclass(eq=False, repr=False)
class JobSet(betterproto.Message):
    id: str = betterproto.string_field(1)


@dataclass(eq=False, repr=False)
class InspectJobSetRequest(betterproto.Message):
    job_set: "JobSet" = betterproto.message_field(1)
    wait: bool = betterproto.bool_field(2)
    details: bool = betterproto.bool_field(3)


@dataclass(eq=False, repr=False)
class ListJobSetRequest(betterproto.Message):
    details: bool = betterproto.bool_field(1)
    projects: List["_pfs__.Project"] = betterproto.message_field(2)
    """A list of projects to filter jobs on, nil means don't filter."""

    pagination_marker: datetime = betterproto.message_field(3)
    """
    we return job sets created before or after this time based on the reverse
    flag
    """

    number: int = betterproto.int64_field(4)
    """number of results to return"""

    reverse: bool = betterproto.bool_field(5)
    """if true, return results in reverse order"""

    jq_filter: str = betterproto.string_field(6)
    """A jq program string for additional result filtering"""


@dataclass(eq=False, repr=False)
class InspectJobRequest(betterproto.Message):
    job: "Job" = betterproto.message_field(1)
    """Callers should set either Job or OutputCommit, not both."""

    wait: bool = betterproto.bool_field(2)
    details: bool = betterproto.bool_field(3)


@dataclass(eq=False, repr=False)
class ListJobRequest(betterproto.Message):
    projects: List["_pfs__.Project"] = betterproto.message_field(7)
    """A list of projects to filter jobs on, nil means don't filter."""

    pipeline: "Pipeline" = betterproto.message_field(1)
    input_commit: List["_pfs__.Commit"] = betterproto.message_field(2)
    history: int = betterproto.int64_field(4)
    """
    History indicates return jobs from historical versions of pipelines
    semantics are: 0: Return jobs from the current version of the pipeline or
    pipelines. 1: Return the above and jobs from the next most recent version
    2: etc.-1: Return jobs from all historical versions.
    """

    details: bool = betterproto.bool_field(5)
    """
    Details indicates whether the result should include all pipeline details in
    each JobInfo, or limited information including name and status, but
    excluding information in the pipeline spec. Leaving this "false" can make
    the call significantly faster in clusters with a large number of pipelines
    and jobs. Note that if 'input_commit' is set, this field is coerced to
    "true"
    """

    jq_filter: str = betterproto.string_field(6)
    """A jq program string for additional result filtering"""

    pagination_marker: datetime = betterproto.message_field(8)
    """timestamp that is pagination marker"""

    number: int = betterproto.int64_field(9)
    """number of results to return"""

    reverse: bool = betterproto.bool_field(10)
    """flag to indicated if results should be returned in reverse order"""


@dataclass(eq=False, repr=False)
class SubscribeJobRequest(betterproto.Message):
    """Streams open jobs until canceled"""

    pipeline: "Pipeline" = betterproto.message_field(1)
    details: bool = betterproto.bool_field(2)


@dataclass(eq=False, repr=False)
class DeleteJobRequest(betterproto.Message):
    job: "Job" = betterproto.message_field(1)


@dataclass(eq=False, repr=False)
class StopJobRequest(betterproto.Message):
    job: "Job" = betterproto.message_field(1)
    reason: str = betterproto.string_field(3)


@dataclass(eq=False, repr=False)
class UpdateJobStateRequest(betterproto.Message):
    job: "Job" = betterproto.message_field(1)
    state: "JobState" = betterproto.enum_field(2)
    reason: str = betterproto.string_field(3)
    restart: int = betterproto.uint64_field(5)
    data_processed: int = betterproto.int64_field(6)
    data_skipped: int = betterproto.int64_field(7)
    data_failed: int = betterproto.int64_field(8)
    data_recovered: int = betterproto.int64_field(9)
    data_total: int = betterproto.int64_field(10)
    stats: "ProcessStats" = betterproto.message_field(11)


@dataclass(eq=False, repr=False)
class GetLogsRequest(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)
    """
    The pipeline from which we want to get logs (required if the job in 'job'
    was created as part of a pipeline. To get logs from a non-orphan job
    without the pipeline that created it, you need to use ElasticSearch).
    """

    job: "Job" = betterproto.message_field(2)
    """The job from which we want to get logs."""

    data_filters: List[str] = betterproto.string_field(3)
    """
    Names of input files from which we want processing logs. This may contain
    multiple files, to query pipelines that contain multiple inputs. Each
    filter may be an absolute path of a file within a pps repo, or it may be a
    hash for that file (to search for files at specific versions)
    """

    datum: "Datum" = betterproto.message_field(4)
    master: bool = betterproto.bool_field(5)
    """If true get logs from the master process"""

    follow: bool = betterproto.bool_field(6)
    """Continue to follow new logs as they become available."""

    tail: int = betterproto.int64_field(7)
    """
    If nonzero, the number of lines from the end of the logs to return.  Note:
    tail applies per container, so you will get tail * <number of pods> total
    lines back.
    """

    use_loki_backend: bool = betterproto.bool_field(8)
    """
    UseLokiBackend causes the logs request to go through the loki backend
    rather than through kubernetes. This behavior can also be achieved by
    setting the LOKI_LOGGING feature flag.
    """

    since: timedelta = betterproto.message_field(9)
    """
    Since specifies how far in the past to return logs from. It defaults to 24
    hours.
    """


@dataclass(eq=False, repr=False)
class LogMessage(betterproto.Message):
    """
    LogMessage is a log line from a PPS worker, annotated with metadata
    indicating when and why the line was logged.
    """

    project_name: str = betterproto.string_field(10)
    """
    The job and pipeline for which a PFS file is being processed (if the job is
    an orphan job, pipeline name and ID will be unset)
    """

    pipeline_name: str = betterproto.string_field(1)
    job_id: str = betterproto.string_field(2)
    worker_id: str = betterproto.string_field(3)
    datum_id: str = betterproto.string_field(4)
    master: bool = betterproto.bool_field(5)
    data: List["InputFile"] = betterproto.message_field(6)
    """The PFS files being processed (one per pipeline/job input)"""

    user: bool = betterproto.bool_field(7)
    """User is true if log message comes from the users code."""

    ts: datetime = betterproto.message_field(8)
    """The message logged, and the time at which it was logged"""

    message: str = betterproto.string_field(9)


@dataclass(eq=False, repr=False)
class RestartDatumRequest(betterproto.Message):
    job: "Job" = betterproto.message_field(1)
    data_filters: List[str] = betterproto.string_field(2)


@dataclass(eq=False, repr=False)
class InspectDatumRequest(betterproto.Message):
    datum: "Datum" = betterproto.message_field(1)


@dataclass(eq=False, repr=False)
class ListDatumRequest(betterproto.Message):
    job: "Job" = betterproto.message_field(1)
    """
    Job and Input are two different ways to specify the datums you want. Only
    one can be set. Job is the job to list datums from.
    """

    input: "Input" = betterproto.message_field(2)
    """
    Input is the input to list datums from. The datums listed are the ones that
    would be run if a pipeline was created with the provided input.
    """

    filter: "ListDatumRequestFilter" = betterproto.message_field(3)
    pagination_marker: str = betterproto.string_field(4)
    """datum id to start from. we do not include this datum in the response"""

    number: int = betterproto.int64_field(5)
    """Number of datums to return"""

    reverse: bool = betterproto.bool_field(6)
    """If true, return datums in reverse order"""


@dataclass(eq=False, repr=False)
class ListDatumRequestFilter(betterproto.Message):
    """
    Filter restricts returned DatumInfo messages to those which match all of
    the filtered attributes.
    """

    state: List["DatumState"] = betterproto.enum_field(1)


@dataclass(eq=False, repr=False)
class CreateDatumRequest(betterproto.Message):
    """
    Emits a stream of datums as they are created from the given input. Client
    must cancel the stream when it no longer wants to receive datums.
    """

    input: "Input" = betterproto.message_field(1)
    """
    Input is the input to list datums from. The datums listed are the ones
    that would be run if a pipeline was created with the provided input. The
    input field is only required for the first request. The server ignores
    subsequent requests' input field.
    """

    number: int = betterproto.int64_field(2)
    """Number of datums to return in next response"""


@dataclass(eq=False, repr=False)
class DatumSetSpec(betterproto.Message):
    """
    DatumSetSpec specifies how a pipeline should split its datums into datum
    sets.
    """

    number: int = betterproto.int64_field(1)
    """
    number, if nonzero, specifies that each datum set should contain `number`
    datums. Datum sets may contain fewer if the total number of datums don't
    divide evenly.
    """

    size_bytes: int = betterproto.int64_field(2)
    """
    size_bytes, if nonzero, specifies a target size for each datum set. Datum
    sets may be larger or smaller than size_bytes, but will usually be pretty
    close to size_bytes in size.
    """

    per_worker: int = betterproto.int64_field(3)
    """
    per_worker, if nonzero, specifies how many datum sets should be created for
    each worker. It can't be set with number or size_bytes.
    """


@dataclass(eq=False, repr=False)
class SchedulingSpec(betterproto.Message):
    node_selector: Dict[str, str] = betterproto.map_field(
        1, betterproto.TYPE_STRING, betterproto.TYPE_STRING
    )
    priority_class_name: str = betterproto.string_field(2)


@dataclass(eq=False, repr=False)
class RerunPipelineRequest(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)
    reprocess: bool = betterproto.bool_field(15)
    """Reprocess forces the pipeline to reprocess all datums."""


@dataclass(eq=False, repr=False)
class CreatePipelineRequest(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)
    tf_job: "TfJob" = betterproto.message_field(2)
    """
    tf_job encodes a Kubeflow TFJob spec. Pachyderm uses this to create TFJobs
    when running in a kubernetes cluster on which kubeflow has been installed.
    Exactly one of 'tf_job' and 'transform' should be set
    """

    transform: "Transform" = betterproto.message_field(3)
    parallelism_spec: "ParallelismSpec" = betterproto.message_field(4)
    egress: "Egress" = betterproto.message_field(5)
    update: bool = betterproto.bool_field(6)
    output_branch: str = betterproto.string_field(7)
    s3_out: bool = betterproto.bool_field(8)
    """
    s3_out, if set, requires a pipeline's user to write to its output repo via
    Pachyderm's s3 gateway (if set, workers will serve Pachyderm's s3 gateway
    API at http://<pipeline>-s3.<namespace>/<job id>.out/my/file). In this mode
    /pfs/out won't be walked or uploaded, and the s3 gateway service in the
    workers will allow writes to the job's output commit
    """

    resource_requests: "ResourceSpec" = betterproto.message_field(9)
    resource_limits: "ResourceSpec" = betterproto.message_field(10)
    sidecar_resource_limits: "ResourceSpec" = betterproto.message_field(11)
    input: "Input" = betterproto.message_field(12)
    description: str = betterproto.string_field(13)
    reprocess: bool = betterproto.bool_field(15)
    """
    Reprocess forces the pipeline to reprocess all datums. It only has meaning
    if Update is true
    """

    service: "Service" = betterproto.message_field(17)
    spout: "Spout" = betterproto.message_field(18)
    datum_set_spec: "DatumSetSpec" = betterproto.message_field(19)
    datum_timeout: timedelta = betterproto.message_field(20)
    job_timeout: timedelta = betterproto.message_field(21)
    salt: str = betterproto.string_field(22)
    datum_tries: int = betterproto.int64_field(23)
    scheduling_spec: "SchedulingSpec" = betterproto.message_field(24)
    pod_spec: str = betterproto.string_field(25)
    pod_patch: str = betterproto.string_field(26)
    spec_commit: "_pfs__.Commit" = betterproto.message_field(27)
    metadata: "Metadata" = betterproto.message_field(28)
    reprocess_spec: str = betterproto.string_field(29)
    autoscaling: bool = betterproto.bool_field(30)
    tolerations: List["Toleration"] = betterproto.message_field(34)
    sidecar_resource_requests: "ResourceSpec" = betterproto.message_field(35)
    dry_run: bool = betterproto.bool_field(37)
    determined: "Determined" = betterproto.message_field(38)
    maximum_expected_uptime: timedelta = betterproto.message_field(39)


@dataclass(eq=False, repr=False)
class CreatePipelineV2Request(betterproto.Message):
    create_pipeline_request_json: str = betterproto.string_field(1)
    dry_run: bool = betterproto.bool_field(2)
    update: bool = betterproto.bool_field(3)
    reprocess: bool = betterproto.bool_field(4)


@dataclass(eq=False, repr=False)
class CreatePipelineV2Response(betterproto.Message):
    effective_create_pipeline_request_json: str = betterproto.string_field(1)


@dataclass(eq=False, repr=False)
class InspectPipelineRequest(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)
    details: bool = betterproto.bool_field(2)
    """
    When true, return PipelineInfos with the details field, which requires
    loading the pipeline spec from PFS.
    """


@dataclass(eq=False, repr=False)
class ListPipelineRequest(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)
    """
    If non-nil, only return info about a single pipeline, this is redundant
    with InspectPipeline unless history is non-zero.
    """

    history: int = betterproto.int64_field(2)
    """
    History indicates how many historical versions you want returned. Its
    semantics are: 0: Return the current version of the pipeline or pipelines.
    1: Return the above and the next most recent version 2: etc.-1: Return all
    historical versions.
    """

    details: bool = betterproto.bool_field(3)
    """Deprecated: Details are always returned."""

    jq_filter: str = betterproto.string_field(4)
    """A jq program string for additional result filtering"""

    commit_set: "_pfs__.CommitSet" = betterproto.message_field(5)
    """If non-nil, will return all the pipeline infos at this commit set"""

    projects: List["_pfs__.Project"] = betterproto.message_field(6)
    """
    Projects to filter on. Empty list means no filter, so return all pipelines.
    """

    def __post_init__(self) -> None:
        super().__post_init__()
        if self.is_set("details"):
            warnings.warn(
                "ListPipelineRequest.details is deprecated", DeprecationWarning
            )


@dataclass(eq=False, repr=False)
class DeletePipelineRequest(betterproto.Message):
    """
    Delete a pipeline.  If the deprecated all member is true, then delete all
    pipelines in the default project.
    """

    pipeline: "Pipeline" = betterproto.message_field(1)
    all: bool = betterproto.bool_field(2)
    force: bool = betterproto.bool_field(3)
    keep_repo: bool = betterproto.bool_field(4)
    must_exist: bool = betterproto.bool_field(5)
    """If true, an error will be returned if the pipeline doesn't exist."""

    def __post_init__(self) -> None:
        super().__post_init__()
        if self.is_set("all"):
            warnings.warn("DeletePipelineRequest.all is deprecated", DeprecationWarning)


@dataclass(eq=False, repr=False)
class DeletePipelinesRequest(betterproto.Message):
    """Delete more than one pipeline."""

    projects: List["_pfs__.Project"] = betterproto.message_field(1)
    """
    All pipelines in each project will be deleted if the caller has permission.
    """

    force: bool = betterproto.bool_field(2)
    keep_repo: bool = betterproto.bool_field(3)
    all: bool = betterproto.bool_field(4)
    """
    If set, all pipelines in all projects will be deleted if the caller has
    permission.
    """


@dataclass(eq=False, repr=False)
class DeletePipelinesResponse(betterproto.Message):
    pipelines: List["Pipeline"] = betterproto.message_field(1)


@dataclass(eq=False, repr=False)
class StartPipelineRequest(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)


@dataclass(eq=False, repr=False)
class StopPipelineRequest(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)
    must_exist: bool = betterproto.bool_field(2)
    """If true, an error will be returned if the pipeline doesn't exist."""


@dataclass(eq=False, repr=False)
class RunPipelineRequest(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)
    provenance: List["_pfs__.Commit"] = betterproto.message_field(2)
    job_id: str = betterproto.string_field(3)


@dataclass(eq=False, repr=False)
class RunCronRequest(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)


@dataclass(eq=False, repr=False)
class CheckStatusRequest(betterproto.Message):
    """Request to check the status of pipelines within a project."""

    all: bool = betterproto.bool_field(1, group="context")
    """boolean field indicating status of all project pipelines."""

    project: "_pfs__.Project" = betterproto.message_field(2, group="context")
    """project field"""


@dataclass(eq=False, repr=False)
class CheckStatusResponse(betterproto.Message):
    """Response for check status request. Provides alerts if any."""

    project: "_pfs__.Project" = betterproto.message_field(1)
    """project field"""

    pipeline: "Pipeline" = betterproto.message_field(2)
    """pipeline field"""

    alerts: List[str] = betterproto.string_field(3)
    """alert indicators"""


@dataclass(eq=False, repr=False)
class CreateSecretRequest(betterproto.Message):
    file: bytes = betterproto.bytes_field(1)


@dataclass(eq=False, repr=False)
class DeleteSecretRequest(betterproto.Message):
    secret: "Secret" = betterproto.message_field(1)


@dataclass(eq=False, repr=False)
class InspectSecretRequest(betterproto.Message):
    secret: "Secret" = betterproto.message_field(1)


@dataclass(eq=False, repr=False)
class Secret(betterproto.Message):
    name: str = betterproto.string_field(1)


@dataclass(eq=False, repr=False)
class SecretInfo(betterproto.Message):
    secret: "Secret" = betterproto.message_field(1)
    type: str = betterproto.string_field(2)
    creation_timestamp: datetime = betterproto.message_field(3)


@dataclass(eq=False, repr=False)
class SecretInfos(betterproto.Message):
    secret_info: List["SecretInfo"] = betterproto.message_field(1)


@dataclass(eq=False, repr=False)
class ActivateAuthRequest(betterproto.Message):
    pass


@dataclass(eq=False, repr=False)
class ActivateAuthResponse(betterproto.Message):
    pass


@dataclass(eq=False, repr=False)
class RunLoadTestRequest(betterproto.Message):
    dag_spec: str = betterproto.string_field(1)
    load_spec: str = betterproto.string_field(2)
    seed: int = betterproto.int64_field(3)
    parallelism: int = betterproto.int64_field(4)
    pod_patch: str = betterproto.string_field(5)
    state_id: str = betterproto.string_field(6)


@dataclass(eq=False, repr=False)
class RunLoadTestResponse(betterproto.Message):
    error: str = betterproto.string_field(1)
    state_id: str = betterproto.string_field(2)


@dataclass(eq=False, repr=False)
class RenderTemplateRequest(betterproto.Message):
    template: str = betterproto.string_field(1)
    args: Dict[str, str] = betterproto.map_field(
        2, betterproto.TYPE_STRING, betterproto.TYPE_STRING
    )


@dataclass(eq=False, repr=False)
class RenderTemplateResponse(betterproto.Message):
    json: str = betterproto.string_field(1)
    specs: List["CreatePipelineRequest"] = betterproto.message_field(2)


@dataclass(eq=False, repr=False)
class LokiRequest(betterproto.Message):
    since: timedelta = betterproto.message_field(1)
    query: str = betterproto.string_field(2)


@dataclass(eq=False, repr=False)
class LokiLogMessage(betterproto.Message):
    message: str = betterproto.string_field(1)


@dataclass(eq=False, repr=False)
class ClusterDefaults(betterproto.Message):
    create_pipeline_request: "CreatePipelineRequest" = betterproto.message_field(3)
    """
    CreatePipelineRequest contains the default JSON CreatePipelineRequest into
    which pipeline specs are merged to form the effective spec used to create a
    pipeline.
    """


@dataclass(eq=False, repr=False)
class GetClusterDefaultsRequest(betterproto.Message):
    pass


@dataclass(eq=False, repr=False)
class GetClusterDefaultsResponse(betterproto.Message):
    cluster_defaults_json: str = betterproto.string_field(2)
    """
    A JSON-encoded ClusterDefaults message, this is the verbatim input passed
    to SetClusterDefaults.
    """


@dataclass(eq=False, repr=False)
class SetClusterDefaultsRequest(betterproto.Message):
    regenerate: bool = betterproto.bool_field(2)
    reprocess: bool = betterproto.bool_field(3)
    dry_run: bool = betterproto.bool_field(4)
    cluster_defaults_json: str = betterproto.string_field(5)
    """
    A JSON-encoded ClusterDefaults message, this will be stored verbatim.
    """


@dataclass(eq=False, repr=False)
class SetClusterDefaultsResponse(betterproto.Message):
    affected_pipelines: List["Pipeline"] = betterproto.message_field(2)


@dataclass(eq=False, repr=False)
class CreatePipelineTransaction(betterproto.Message):
    create_pipeline_request: "CreatePipelineRequest" = betterproto.message_field(1)
    user_json: str = betterproto.string_field(2)
    effective_json: str = betterproto.string_field(3)


@dataclass(eq=False, repr=False)
class ProjectDefaults(betterproto.Message):
    create_pipeline_request: "CreatePipelineRequest" = betterproto.message_field(1)


@dataclass(eq=False, repr=False)
class GetProjectDefaultsRequest(betterproto.Message):
    project: "_pfs__.Project" = betterproto.message_field(1)


@dataclass(eq=False, repr=False)
class GetProjectDefaultsResponse(betterproto.Message):
    project_defaults_json: str = betterproto.string_field(1)
    """
    A JSON-encoded ProjectDefaults message, this is the verbatim input passed
    to SetProjectDefaults.
    """


@dataclass(eq=False, repr=False)
class SetProjectDefaultsRequest(betterproto.Message):
    project: "_pfs__.Project" = betterproto.message_field(1)
    regenerate: bool = betterproto.bool_field(2)
    reprocess: bool = betterproto.bool_field(3)
    dry_run: bool = betterproto.bool_field(4)
    project_defaults_json: str = betterproto.string_field(5)
    """
    A JSON-encoded ProjectDefaults message, this will be stored verbatim.
    """


@dataclass(eq=False, repr=False)
class SetProjectDefaultsResponse(betterproto.Message):
    affected_pipelines: List["Pipeline"] = betterproto.message_field(1)


class ApiStub:

    def __init__(self, channel: "grpc.Channel"):
        self.__rpc_inspect_job = channel.unary_unary(
            "/pps_v2.API/InspectJob",
            request_serializer=InspectJobRequest.SerializeToString,
            response_deserializer=JobInfo.FromString,
        )
        self.__rpc_inspect_job_set = channel.unary_stream(
            "/pps_v2.API/InspectJobSet",
            request_serializer=InspectJobSetRequest.SerializeToString,
            response_deserializer=JobInfo.FromString,
        )
        self.__rpc_list_job = channel.unary_stream(
            "/pps_v2.API/ListJob",
            request_serializer=ListJobRequest.SerializeToString,
            response_deserializer=JobInfo.FromString,
        )
        self.__rpc_list_job_set = channel.unary_stream(
            "/pps_v2.API/ListJobSet",
            request_serializer=ListJobSetRequest.SerializeToString,
            response_deserializer=JobSetInfo.FromString,
        )
        self.__rpc_subscribe_job = channel.unary_stream(
            "/pps_v2.API/SubscribeJob",
            request_serializer=SubscribeJobRequest.SerializeToString,
            response_deserializer=JobInfo.FromString,
        )
        self.__rpc_delete_job = channel.unary_unary(
            "/pps_v2.API/DeleteJob",
            request_serializer=DeleteJobRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_stop_job = channel.unary_unary(
            "/pps_v2.API/StopJob",
            request_serializer=StopJobRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_inspect_datum = channel.unary_unary(
            "/pps_v2.API/InspectDatum",
            request_serializer=InspectDatumRequest.SerializeToString,
            response_deserializer=DatumInfo.FromString,
        )
        self.__rpc_list_datum = channel.unary_stream(
            "/pps_v2.API/ListDatum",
            request_serializer=ListDatumRequest.SerializeToString,
            response_deserializer=DatumInfo.FromString,
        )
        self.__rpc_create_datum = channel.stream_stream(
            "/pps_v2.API/CreateDatum",
            request_serializer=CreateDatumRequest.SerializeToString,
            response_deserializer=DatumInfo.FromString,
        )
        self.__rpc_restart_datum = channel.unary_unary(
            "/pps_v2.API/RestartDatum",
            request_serializer=RestartDatumRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_rerun_pipeline = channel.unary_unary(
            "/pps_v2.API/RerunPipeline",
            request_serializer=RerunPipelineRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_create_pipeline = channel.unary_unary(
            "/pps_v2.API/CreatePipeline",
            request_serializer=CreatePipelineRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_create_pipeline_v2 = channel.unary_unary(
            "/pps_v2.API/CreatePipelineV2",
            request_serializer=CreatePipelineV2Request.SerializeToString,
            response_deserializer=CreatePipelineV2Response.FromString,
        )
        self.__rpc_inspect_pipeline = channel.unary_unary(
            "/pps_v2.API/InspectPipeline",
            request_serializer=InspectPipelineRequest.SerializeToString,
            response_deserializer=PipelineInfo.FromString,
        )
        self.__rpc_list_pipeline = channel.unary_stream(
            "/pps_v2.API/ListPipeline",
            request_serializer=ListPipelineRequest.SerializeToString,
            response_deserializer=PipelineInfo.FromString,
        )
        self.__rpc_delete_pipeline = channel.unary_unary(
            "/pps_v2.API/DeletePipeline",
            request_serializer=DeletePipelineRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_delete_pipelines = channel.unary_unary(
            "/pps_v2.API/DeletePipelines",
            request_serializer=DeletePipelinesRequest.SerializeToString,
            response_deserializer=DeletePipelinesResponse.FromString,
        )
        self.__rpc_start_pipeline = channel.unary_unary(
            "/pps_v2.API/StartPipeline",
            request_serializer=StartPipelineRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_stop_pipeline = channel.unary_unary(
            "/pps_v2.API/StopPipeline",
            request_serializer=StopPipelineRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_run_pipeline = channel.unary_unary(
            "/pps_v2.API/RunPipeline",
            request_serializer=RunPipelineRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_run_cron = channel.unary_unary(
            "/pps_v2.API/RunCron",
            request_serializer=RunCronRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_check_status = channel.unary_stream(
            "/pps_v2.API/CheckStatus",
            request_serializer=CheckStatusRequest.SerializeToString,
            response_deserializer=CheckStatusResponse.FromString,
        )
        self.__rpc_create_secret = channel.unary_unary(
            "/pps_v2.API/CreateSecret",
            request_serializer=CreateSecretRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_delete_secret = channel.unary_unary(
            "/pps_v2.API/DeleteSecret",
            request_serializer=DeleteSecretRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_list_secret = channel.unary_unary(
            "/pps_v2.API/ListSecret",
            request_serializer=betterproto_lib_google_protobuf.Empty.SerializeToString,
            response_deserializer=SecretInfos.FromString,
        )
        self.__rpc_inspect_secret = channel.unary_unary(
            "/pps_v2.API/InspectSecret",
            request_serializer=InspectSecretRequest.SerializeToString,
            response_deserializer=SecretInfo.FromString,
        )
        self.__rpc_delete_all = channel.unary_unary(
            "/pps_v2.API/DeleteAll",
            request_serializer=betterproto_lib_google_protobuf.Empty.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_get_logs = channel.unary_stream(
            "/pps_v2.API/GetLogs",
            request_serializer=GetLogsRequest.SerializeToString,
            response_deserializer=LogMessage.FromString,
        )
        self.__rpc_activate_auth = channel.unary_unary(
            "/pps_v2.API/ActivateAuth",
            request_serializer=ActivateAuthRequest.SerializeToString,
            response_deserializer=ActivateAuthResponse.FromString,
        )
        self.__rpc_update_job_state = channel.unary_unary(
            "/pps_v2.API/UpdateJobState",
            request_serializer=UpdateJobStateRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_run_load_test = channel.unary_unary(
            "/pps_v2.API/RunLoadTest",
            request_serializer=RunLoadTestRequest.SerializeToString,
            response_deserializer=RunLoadTestResponse.FromString,
        )
        self.__rpc_run_load_test_default = channel.unary_unary(
            "/pps_v2.API/RunLoadTestDefault",
            request_serializer=betterproto_lib_google_protobuf.Empty.SerializeToString,
            response_deserializer=RunLoadTestResponse.FromString,
        )
        self.__rpc_render_template = channel.unary_unary(
            "/pps_v2.API/RenderTemplate",
            request_serializer=RenderTemplateRequest.SerializeToString,
            response_deserializer=RenderTemplateResponse.FromString,
        )
        self.__rpc_list_task = channel.unary_stream(
            "/pps_v2.API/ListTask",
            request_serializer=_taskapi__.ListTaskRequest.SerializeToString,
            response_deserializer=_taskapi__.TaskInfo.FromString,
        )
        self.__rpc_get_kube_events = channel.unary_stream(
            "/pps_v2.API/GetKubeEvents",
            request_serializer=LokiRequest.SerializeToString,
            response_deserializer=LokiLogMessage.FromString,
        )
        self.__rpc_query_loki = channel.unary_stream(
            "/pps_v2.API/QueryLoki",
            request_serializer=LokiRequest.SerializeToString,
            response_deserializer=LokiLogMessage.FromString,
        )
        self.__rpc_get_cluster_defaults = channel.unary_unary(
            "/pps_v2.API/GetClusterDefaults",
            request_serializer=GetClusterDefaultsRequest.SerializeToString,
            response_deserializer=GetClusterDefaultsResponse.FromString,
        )
        self.__rpc_set_cluster_defaults = channel.unary_unary(
            "/pps_v2.API/SetClusterDefaults",
            request_serializer=SetClusterDefaultsRequest.SerializeToString,
            response_deserializer=SetClusterDefaultsResponse.FromString,
        )
        self.__rpc_get_project_defaults = channel.unary_unary(
            "/pps_v2.API/GetProjectDefaults",
            request_serializer=GetProjectDefaultsRequest.SerializeToString,
            response_deserializer=GetProjectDefaultsResponse.FromString,
        )
        self.__rpc_set_project_defaults = channel.unary_unary(
            "/pps_v2.API/SetProjectDefaults",
            request_serializer=SetProjectDefaultsRequest.SerializeToString,
            response_deserializer=SetProjectDefaultsResponse.FromString,
        )

    def inspect_job(
        self, *, job: "Job" = None, wait: bool = False, details: bool = False
    ) -> "JobInfo":

        request = InspectJobRequest()
        if job is not None:
            request.job = job
        request.wait = wait
        request.details = details

        return self.__rpc_inspect_job(request)

    def inspect_job_set(
        self, *, job_set: "JobSet" = None, wait: bool = False, details: bool = False
    ) -> Iterator["JobInfo"]:

        request = InspectJobSetRequest()
        if job_set is not None:
            request.job_set = job_set
        request.wait = wait
        request.details = details

        for response in self.__rpc_inspect_job_set(request):
            yield response

    def list_job(
        self,
        *,
        projects: Optional[List["_pfs__.Project"]] = None,
        pipeline: "Pipeline" = None,
        input_commit: Optional[List["_pfs__.Commit"]] = None,
        history: int = 0,
        details: bool = False,
        jq_filter: str = "",
        pagination_marker: datetime = None,
        number: int = 0,
        reverse: bool = False
    ) -> Iterator["JobInfo"]:
        projects = projects or []
        input_commit = input_commit or []

        request = ListJobRequest()
        if projects is not None:
            request.projects = projects
        if pipeline is not None:
            request.pipeline = pipeline
        if input_commit is not None:
            request.input_commit = input_commit
        request.history = history
        request.details = details
        request.jq_filter = jq_filter
        if pagination_marker is not None:
            request.pagination_marker = pagination_marker
        request.number = number
        request.reverse = reverse

        for response in self.__rpc_list_job(request):
            yield response

    def list_job_set(
        self,
        *,
        details: bool = False,
        projects: Optional[List["_pfs__.Project"]] = None,
        pagination_marker: datetime = None,
        number: int = 0,
        reverse: bool = False,
        jq_filter: str = ""
    ) -> Iterator["JobSetInfo"]:
        projects = projects or []

        request = ListJobSetRequest()
        request.details = details
        if projects is not None:
            request.projects = projects
        if pagination_marker is not None:
            request.pagination_marker = pagination_marker
        request.number = number
        request.reverse = reverse
        request.jq_filter = jq_filter

        for response in self.__rpc_list_job_set(request):
            yield response

    def subscribe_job(
        self, *, pipeline: "Pipeline" = None, details: bool = False
    ) -> Iterator["JobInfo"]:

        request = SubscribeJobRequest()
        if pipeline is not None:
            request.pipeline = pipeline
        request.details = details

        for response in self.__rpc_subscribe_job(request):
            yield response

    def delete_job(
        self, *, job: "Job" = None
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = DeleteJobRequest()
        if job is not None:
            request.job = job

        return self.__rpc_delete_job(request)

    def stop_job(
        self, *, job: "Job" = None, reason: str = ""
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = StopJobRequest()
        if job is not None:
            request.job = job
        request.reason = reason

        return self.__rpc_stop_job(request)

    def inspect_datum(self, *, datum: "Datum" = None) -> "DatumInfo":

        request = InspectDatumRequest()
        if datum is not None:
            request.datum = datum

        return self.__rpc_inspect_datum(request)

    def list_datum(
        self,
        *,
        job: "Job" = None,
        input: "Input" = None,
        filter: "ListDatumRequestFilter" = None,
        pagination_marker: str = "",
        number: int = 0,
        reverse: bool = False
    ) -> Iterator["DatumInfo"]:

        request = ListDatumRequest()
        if job is not None:
            request.job = job
        if input is not None:
            request.input = input
        if filter is not None:
            request.filter = filter
        request.pagination_marker = pagination_marker
        request.number = number
        request.reverse = reverse

        for response in self.__rpc_list_datum(request):
            yield response

    def create_datum(
        self,
        request_iterator: Union[
            AsyncIterable["CreateDatumRequest"], Iterable["CreateDatumRequest"]
        ],
    ) -> Iterator["DatumInfo"]:

        for response in self.__rpc_create_datum(request_iterator):
            yield response

    def restart_datum(
        self, *, job: "Job" = None, data_filters: Optional[List[str]] = None
    ) -> "betterproto_lib_google_protobuf.Empty":
        data_filters = data_filters or []

        request = RestartDatumRequest()
        if job is not None:
            request.job = job
        request.data_filters = data_filters

        return self.__rpc_restart_datum(request)

    def rerun_pipeline(
        self, *, pipeline: "Pipeline" = None, reprocess: bool = False
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = RerunPipelineRequest()
        if pipeline is not None:
            request.pipeline = pipeline
        request.reprocess = reprocess

        return self.__rpc_rerun_pipeline(request)

    def create_pipeline(
        self,
        *,
        pipeline: "Pipeline" = None,
        tf_job: "TfJob" = None,
        transform: "Transform" = None,
        parallelism_spec: "ParallelismSpec" = None,
        egress: "Egress" = None,
        update: bool = False,
        output_branch: str = "",
        s3_out: bool = False,
        resource_requests: "ResourceSpec" = None,
        resource_limits: "ResourceSpec" = None,
        sidecar_resource_limits: "ResourceSpec" = None,
        input: "Input" = None,
        description: str = "",
        reprocess: bool = False,
        service: "Service" = None,
        spout: "Spout" = None,
        datum_set_spec: "DatumSetSpec" = None,
        datum_timeout: timedelta = None,
        job_timeout: timedelta = None,
        salt: str = "",
        datum_tries: int = 0,
        scheduling_spec: "SchedulingSpec" = None,
        pod_spec: str = "",
        pod_patch: str = "",
        spec_commit: "_pfs__.Commit" = None,
        metadata: "Metadata" = None,
        reprocess_spec: str = "",
        autoscaling: bool = False,
        tolerations: Optional[List["Toleration"]] = None,
        sidecar_resource_requests: "ResourceSpec" = None,
        dry_run: bool = False,
        determined: "Determined" = None,
        maximum_expected_uptime: timedelta = None
    ) -> "betterproto_lib_google_protobuf.Empty":
        tolerations = tolerations or []

        request = CreatePipelineRequest()
        if pipeline is not None:
            request.pipeline = pipeline
        if tf_job is not None:
            request.tf_job = tf_job
        if transform is not None:
            request.transform = transform
        if parallelism_spec is not None:
            request.parallelism_spec = parallelism_spec
        if egress is not None:
            request.egress = egress
        request.update = update
        request.output_branch = output_branch
        request.s3_out = s3_out
        if resource_requests is not None:
            request.resource_requests = resource_requests
        if resource_limits is not None:
            request.resource_limits = resource_limits
        if sidecar_resource_limits is not None:
            request.sidecar_resource_limits = sidecar_resource_limits
        if input is not None:
            request.input = input
        request.description = description
        request.reprocess = reprocess
        if service is not None:
            request.service = service
        if spout is not None:
            request.spout = spout
        if datum_set_spec is not None:
            request.datum_set_spec = datum_set_spec
        if datum_timeout is not None:
            request.datum_timeout = datum_timeout
        if job_timeout is not None:
            request.job_timeout = job_timeout
        request.salt = salt
        request.datum_tries = datum_tries
        if scheduling_spec is not None:
            request.scheduling_spec = scheduling_spec
        request.pod_spec = pod_spec
        request.pod_patch = pod_patch
        if spec_commit is not None:
            request.spec_commit = spec_commit
        if metadata is not None:
            request.metadata = metadata
        request.reprocess_spec = reprocess_spec
        request.autoscaling = autoscaling
        if tolerations is not None:
            request.tolerations = tolerations
        if sidecar_resource_requests is not None:
            request.sidecar_resource_requests = sidecar_resource_requests
        request.dry_run = dry_run
        if determined is not None:
            request.determined = determined
        if maximum_expected_uptime is not None:
            request.maximum_expected_uptime = maximum_expected_uptime

        return self.__rpc_create_pipeline(request)

    def create_pipeline_v2(
        self,
        *,
        create_pipeline_request_json: str = "",
        dry_run: bool = False,
        update: bool = False,
        reprocess: bool = False
    ) -> "CreatePipelineV2Response":

        request = CreatePipelineV2Request()
        request.create_pipeline_request_json = create_pipeline_request_json
        request.dry_run = dry_run
        request.update = update
        request.reprocess = reprocess

        return self.__rpc_create_pipeline_v2(request)

    def inspect_pipeline(
        self, *, pipeline: "Pipeline" = None, details: bool = False
    ) -> "PipelineInfo":

        request = InspectPipelineRequest()
        if pipeline is not None:
            request.pipeline = pipeline
        request.details = details

        return self.__rpc_inspect_pipeline(request)

    def list_pipeline(
        self,
        *,
        pipeline: "Pipeline" = None,
        history: int = 0,
        details: bool = False,
        jq_filter: str = "",
        commit_set: "_pfs__.CommitSet" = None,
        projects: Optional[List["_pfs__.Project"]] = None
    ) -> Iterator["PipelineInfo"]:
        projects = projects or []

        request = ListPipelineRequest()
        if pipeline is not None:
            request.pipeline = pipeline
        request.history = history
        request.details = details
        request.jq_filter = jq_filter
        if commit_set is not None:
            request.commit_set = commit_set
        if projects is not None:
            request.projects = projects

        for response in self.__rpc_list_pipeline(request):
            yield response

    def delete_pipeline(
        self,
        *,
        pipeline: "Pipeline" = None,
        all: bool = False,
        force: bool = False,
        keep_repo: bool = False,
        must_exist: bool = False
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = DeletePipelineRequest()
        if pipeline is not None:
            request.pipeline = pipeline
        request.all = all
        request.force = force
        request.keep_repo = keep_repo
        request.must_exist = must_exist

        return self.__rpc_delete_pipeline(request)

    def delete_pipelines(
        self,
        *,
        projects: Optional[List["_pfs__.Project"]] = None,
        force: bool = False,
        keep_repo: bool = False,
        all: bool = False
    ) -> "DeletePipelinesResponse":
        projects = projects or []

        request = DeletePipelinesRequest()
        if projects is not None:
            request.projects = projects
        request.force = force
        request.keep_repo = keep_repo
        request.all = all

        return self.__rpc_delete_pipelines(request)

    def start_pipeline(
        self, *, pipeline: "Pipeline" = None
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = StartPipelineRequest()
        if pipeline is not None:
            request.pipeline = pipeline

        return self.__rpc_start_pipeline(request)

    def stop_pipeline(
        self, *, pipeline: "Pipeline" = None, must_exist: bool = False
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = StopPipelineRequest()
        if pipeline is not None:
            request.pipeline = pipeline
        request.must_exist = must_exist

        return self.__rpc_stop_pipeline(request)

    def run_pipeline(
        self,
        *,
        pipeline: "Pipeline" = None,
        provenance: Optional[List["_pfs__.Commit"]] = None,
        job_id: str = ""
    ) -> "betterproto_lib_google_protobuf.Empty":
        provenance = provenance or []

        request = RunPipelineRequest()
        if pipeline is not None:
            request.pipeline = pipeline
        if provenance is not None:
            request.provenance = provenance
        request.job_id = job_id

        return self.__rpc_run_pipeline(request)

    def run_cron(
        self, *, pipeline: "Pipeline" = None
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = RunCronRequest()
        if pipeline is not None:
            request.pipeline = pipeline

        return self.__rpc_run_cron(request)

    def check_status(
        self, *, all: bool = False, project: "_pfs__.Project" = None
    ) -> Iterator["CheckStatusResponse"]:

        request = CheckStatusRequest()
        request.all = all
        if project is not None:
            request.project = project

        for response in self.__rpc_check_status(request):
            yield response

    def create_secret(
        self, *, file: bytes = b""
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = CreateSecretRequest()
        request.file = file

        return self.__rpc_create_secret(request)

    def delete_secret(
        self, *, secret: "Secret" = None
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = DeleteSecretRequest()
        if secret is not None:
            request.secret = secret

        return self.__rpc_delete_secret(request)

    def list_secret(self) -> "SecretInfos":

        request = betterproto_lib_google_protobuf.Empty()

        return self.__rpc_list_secret(request)

    def inspect_secret(self, *, secret: "Secret" = None) -> "SecretInfo":

        request = InspectSecretRequest()
        if secret is not None:
            request.secret = secret

        return self.__rpc_inspect_secret(request)

    def delete_all(self) -> "betterproto_lib_google_protobuf.Empty":

        request = betterproto_lib_google_protobuf.Empty()

        return self.__rpc_delete_all(request)

    def get_logs(
        self,
        *,
        pipeline: "Pipeline" = None,
        job: "Job" = None,
        data_filters: Optional[List[str]] = None,
        datum: "Datum" = None,
        master: bool = False,
        follow: bool = False,
        tail: int = 0,
        use_loki_backend: bool = False,
        since: timedelta = None
    ) -> Iterator["LogMessage"]:
        data_filters = data_filters or []

        request = GetLogsRequest()
        if pipeline is not None:
            request.pipeline = pipeline
        if job is not None:
            request.job = job
        request.data_filters = data_filters
        if datum is not None:
            request.datum = datum
        request.master = master
        request.follow = follow
        request.tail = tail
        request.use_loki_backend = use_loki_backend
        if since is not None:
            request.since = since

        for response in self.__rpc_get_logs(request):
            yield response

    def activate_auth(self) -> "ActivateAuthResponse":

        request = ActivateAuthRequest()

        return self.__rpc_activate_auth(request)

    def update_job_state(
        self,
        *,
        job: "Job" = None,
        state: "JobState" = None,
        reason: str = "",
        restart: int = 0,
        data_processed: int = 0,
        data_skipped: int = 0,
        data_failed: int = 0,
        data_recovered: int = 0,
        data_total: int = 0,
        stats: "ProcessStats" = None
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = UpdateJobStateRequest()
        if job is not None:
            request.job = job
        request.state = state
        request.reason = reason
        request.restart = restart
        request.data_processed = data_processed
        request.data_skipped = data_skipped
        request.data_failed = data_failed
        request.data_recovered = data_recovered
        request.data_total = data_total
        if stats is not None:
            request.stats = stats

        return self.__rpc_update_job_state(request)

    def run_load_test(
        self,
        *,
        dag_spec: str = "",
        load_spec: str = "",
        seed: int = 0,
        parallelism: int = 0,
        pod_patch: str = "",
        state_id: str = ""
    ) -> "RunLoadTestResponse":

        request = RunLoadTestRequest()
        request.dag_spec = dag_spec
        request.load_spec = load_spec
        request.seed = seed
        request.parallelism = parallelism
        request.pod_patch = pod_patch
        request.state_id = state_id

        return self.__rpc_run_load_test(request)

    def run_load_test_default(self) -> "RunLoadTestResponse":

        request = betterproto_lib_google_protobuf.Empty()

        return self.__rpc_run_load_test_default(request)

    def render_template(
        self, *, template: str = "", args: Dict[str, str] = None
    ) -> "RenderTemplateResponse":

        request = RenderTemplateRequest()
        request.template = template
        request.args = args

        return self.__rpc_render_template(request)

    def list_task(self, *, group: "Group" = None) -> Iterator["_taskapi__.TaskInfo"]:

        request = _taskapi__.ListTaskRequest()
        if group is not None:
            request.group = group

        for response in self.__rpc_list_task(request):
            yield response

    def get_kube_events(
        self, *, since: timedelta = None, query: str = ""
    ) -> Iterator["LokiLogMessage"]:

        request = LokiRequest()
        if since is not None:
            request.since = since
        request.query = query

        for response in self.__rpc_get_kube_events(request):
            yield response

    def query_loki(
        self, *, since: timedelta = None, query: str = ""
    ) -> Iterator["LokiLogMessage"]:

        request = LokiRequest()
        if since is not None:
            request.since = since
        request.query = query

        for response in self.__rpc_query_loki(request):
            yield response

    def get_cluster_defaults(self) -> "GetClusterDefaultsResponse":

        request = GetClusterDefaultsRequest()

        return self.__rpc_get_cluster_defaults(request)

    def set_cluster_defaults(
        self,
        *,
        regenerate: bool = False,
        reprocess: bool = False,
        dry_run: bool = False,
        cluster_defaults_json: str = ""
    ) -> "SetClusterDefaultsResponse":

        request = SetClusterDefaultsRequest()
        request.regenerate = regenerate
        request.reprocess = reprocess
        request.dry_run = dry_run
        request.cluster_defaults_json = cluster_defaults_json

        return self.__rpc_set_cluster_defaults(request)

    def get_project_defaults(
        self, *, project: "_pfs__.Project" = None
    ) -> "GetProjectDefaultsResponse":

        request = GetProjectDefaultsRequest()
        if project is not None:
            request.project = project

        return self.__rpc_get_project_defaults(request)

    def set_project_defaults(
        self,
        *,
        project: "_pfs__.Project" = None,
        regenerate: bool = False,
        reprocess: bool = False,
        dry_run: bool = False,
        project_defaults_json: str = ""
    ) -> "SetProjectDefaultsResponse":

        request = SetProjectDefaultsRequest()
        if project is not None:
            request.project = project
        request.regenerate = regenerate
        request.reprocess = reprocess
        request.dry_run = dry_run
        request.project_defaults_json = project_defaults_json

        return self.__rpc_set_project_defaults(request)


class ApiBase:

    def inspect_job(
        self, job: "Job", wait: bool, details: bool, context: "grpc.ServicerContext"
    ) -> "JobInfo":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def inspect_job_set(
        self,
        job_set: "JobSet",
        wait: bool,
        details: bool,
        context: "grpc.ServicerContext",
    ) -> Iterator["JobInfo"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def list_job(
        self,
        projects: Optional[List["_pfs__.Project"]],
        pipeline: "Pipeline",
        input_commit: Optional[List["_pfs__.Commit"]],
        history: int,
        details: bool,
        jq_filter: str,
        pagination_marker: datetime,
        number: int,
        reverse: bool,
        context: "grpc.ServicerContext",
    ) -> Iterator["JobInfo"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def list_job_set(
        self,
        details: bool,
        projects: Optional[List["_pfs__.Project"]],
        pagination_marker: datetime,
        number: int,
        reverse: bool,
        jq_filter: str,
        context: "grpc.ServicerContext",
    ) -> Iterator["JobSetInfo"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def subscribe_job(
        self, pipeline: "Pipeline", details: bool, context: "grpc.ServicerContext"
    ) -> Iterator["JobInfo"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def delete_job(
        self, job: "Job", context: "grpc.ServicerContext"
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def stop_job(
        self, job: "Job", reason: str, context: "grpc.ServicerContext"
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def inspect_datum(
        self, datum: "Datum", context: "grpc.ServicerContext"
    ) -> "DatumInfo":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def list_datum(
        self,
        job: "Job",
        input: "Input",
        filter: "ListDatumRequestFilter",
        pagination_marker: str,
        number: int,
        reverse: bool,
        context: "grpc.ServicerContext",
    ) -> Iterator["DatumInfo"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def create_datum(
        self,
        request_iterator: Iterator["CreateDatumRequest"],
        context: "grpc.ServicerContext",
    ) -> Iterator["DatumInfo"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def restart_datum(
        self,
        job: "Job",
        data_filters: Optional[List[str]],
        context: "grpc.ServicerContext",
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def rerun_pipeline(
        self, pipeline: "Pipeline", reprocess: bool, context: "grpc.ServicerContext"
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def create_pipeline(
        self,
        pipeline: "Pipeline",
        tf_job: "TfJob",
        transform: "Transform",
        parallelism_spec: "ParallelismSpec",
        egress: "Egress",
        update: bool,
        output_branch: str,
        s3_out: bool,
        resource_requests: "ResourceSpec",
        resource_limits: "ResourceSpec",
        sidecar_resource_limits: "ResourceSpec",
        input: "Input",
        description: str,
        reprocess: bool,
        service: "Service",
        spout: "Spout",
        datum_set_spec: "DatumSetSpec",
        datum_timeout: timedelta,
        job_timeout: timedelta,
        salt: str,
        datum_tries: int,
        scheduling_spec: "SchedulingSpec",
        pod_spec: str,
        pod_patch: str,
        spec_commit: "_pfs__.Commit",
        metadata: "Metadata",
        reprocess_spec: str,
        autoscaling: bool,
        tolerations: Optional[List["Toleration"]],
        sidecar_resource_requests: "ResourceSpec",
        dry_run: bool,
        determined: "Determined",
        maximum_expected_uptime: timedelta,
        context: "grpc.ServicerContext",
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def create_pipeline_v2(
        self,
        create_pipeline_request_json: str,
        dry_run: bool,
        update: bool,
        reprocess: bool,
        context: "grpc.ServicerContext",
    ) -> "CreatePipelineV2Response":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def inspect_pipeline(
        self, pipeline: "Pipeline", details: bool, context: "grpc.ServicerContext"
    ) -> "PipelineInfo":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def list_pipeline(
        self,
        pipeline: "Pipeline",
        history: int,
        details: bool,
        jq_filter: str,
        commit_set: "_pfs__.CommitSet",
        projects: Optional[List["_pfs__.Project"]],
        context: "grpc.ServicerContext",
    ) -> Iterator["PipelineInfo"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def delete_pipeline(
        self,
        pipeline: "Pipeline",
        all: bool,
        force: bool,
        keep_repo: bool,
        must_exist: bool,
        context: "grpc.ServicerContext",
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def delete_pipelines(
        self,
        projects: Optional[List["_pfs__.Project"]],
        force: bool,
        keep_repo: bool,
        all: bool,
        context: "grpc.ServicerContext",
    ) -> "DeletePipelinesResponse":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def start_pipeline(
        self, pipeline: "Pipeline", context: "grpc.ServicerContext"
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def stop_pipeline(
        self, pipeline: "Pipeline", must_exist: bool, context: "grpc.ServicerContext"
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def run_pipeline(
        self,
        pipeline: "Pipeline",
        provenance: Optional[List["_pfs__.Commit"]],
        job_id: str,
        context: "grpc.ServicerContext",
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def run_cron(
        self, pipeline: "Pipeline", context: "grpc.ServicerContext"
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def check_status(
        self, all: bool, project: "_pfs__.Project", context: "grpc.ServicerContext"
    ) -> Iterator["CheckStatusResponse"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def create_secret(
        self, file: bytes, context: "grpc.ServicerContext"
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def delete_secret(
        self, secret: "Secret", context: "grpc.ServicerContext"
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def list_secret(self, context: "grpc.ServicerContext") -> "SecretInfos":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def inspect_secret(
        self, secret: "Secret", context: "grpc.ServicerContext"
    ) -> "SecretInfo":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def delete_all(
        self, context: "grpc.ServicerContext"
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def get_logs(
        self,
        pipeline: "Pipeline",
        job: "Job",
        data_filters: Optional[List[str]],
        datum: "Datum",
        master: bool,
        follow: bool,
        tail: int,
        use_loki_backend: bool,
        since: timedelta,
        context: "grpc.ServicerContext",
    ) -> Iterator["LogMessage"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def activate_auth(self, context: "grpc.ServicerContext") -> "ActivateAuthResponse":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def update_job_state(
        self,
        job: "Job",
        state: "JobState",
        reason: str,
        restart: int,
        data_processed: int,
        data_skipped: int,
        data_failed: int,
        data_recovered: int,
        data_total: int,
        stats: "ProcessStats",
        context: "grpc.ServicerContext",
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def run_load_test(
        self,
        dag_spec: str,
        load_spec: str,
        seed: int,
        parallelism: int,
        pod_patch: str,
        state_id: str,
        context: "grpc.ServicerContext",
    ) -> "RunLoadTestResponse":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def run_load_test_default(
        self, context: "grpc.ServicerContext"
    ) -> "RunLoadTestResponse":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def render_template(
        self, template: str, args: Dict[str, str], context: "grpc.ServicerContext"
    ) -> "RenderTemplateResponse":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def list_task(
        self, group: "Group", context: "grpc.ServicerContext"
    ) -> Iterator["_taskapi__.TaskInfo"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def get_kube_events(
        self, since: timedelta, query: str, context: "grpc.ServicerContext"
    ) -> Iterator["LokiLogMessage"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def query_loki(
        self, since: timedelta, query: str, context: "grpc.ServicerContext"
    ) -> Iterator["LokiLogMessage"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def get_cluster_defaults(
        self, context: "grpc.ServicerContext"
    ) -> "GetClusterDefaultsResponse":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def set_cluster_defaults(
        self,
        regenerate: bool,
        reprocess: bool,
        dry_run: bool,
        cluster_defaults_json: str,
        context: "grpc.ServicerContext",
    ) -> "SetClusterDefaultsResponse":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def get_project_defaults(
        self, project: "_pfs__.Project", context: "grpc.ServicerContext"
    ) -> "GetProjectDefaultsResponse":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def set_project_defaults(
        self,
        project: "_pfs__.Project",
        regenerate: bool,
        reprocess: bool,
        dry_run: bool,
        project_defaults_json: str,
        context: "grpc.ServicerContext",
    ) -> "SetProjectDefaultsResponse":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    __proto_path__ = "pps_v2.API"

    @property
    def __rpc_methods__(self):
        return {
            "InspectJob": grpc.unary_unary_rpc_method_handler(
                self.inspect_job,
                request_deserializer=InspectJobRequest.FromString,
                response_serializer=InspectJobRequest.SerializeToString,
            ),
            "InspectJobSet": grpc.unary_stream_rpc_method_handler(
                self.inspect_job_set,
                request_deserializer=InspectJobSetRequest.FromString,
                response_serializer=InspectJobSetRequest.SerializeToString,
            ),
            "ListJob": grpc.unary_stream_rpc_method_handler(
                self.list_job,
                request_deserializer=ListJobRequest.FromString,
                response_serializer=ListJobRequest.SerializeToString,
            ),
            "ListJobSet": grpc.unary_stream_rpc_method_handler(
                self.list_job_set,
                request_deserializer=ListJobSetRequest.FromString,
                response_serializer=ListJobSetRequest.SerializeToString,
            ),
            "SubscribeJob": grpc.unary_stream_rpc_method_handler(
                self.subscribe_job,
                request_deserializer=SubscribeJobRequest.FromString,
                response_serializer=SubscribeJobRequest.SerializeToString,
            ),
            "DeleteJob": grpc.unary_unary_rpc_method_handler(
                self.delete_job,
                request_deserializer=DeleteJobRequest.FromString,
                response_serializer=DeleteJobRequest.SerializeToString,
            ),
            "StopJob": grpc.unary_unary_rpc_method_handler(
                self.stop_job,
                request_deserializer=StopJobRequest.FromString,
                response_serializer=StopJobRequest.SerializeToString,
            ),
            "InspectDatum": grpc.unary_unary_rpc_method_handler(
                self.inspect_datum,
                request_deserializer=InspectDatumRequest.FromString,
                response_serializer=InspectDatumRequest.SerializeToString,
            ),
            "ListDatum": grpc.unary_stream_rpc_method_handler(
                self.list_datum,
                request_deserializer=ListDatumRequest.FromString,
                response_serializer=ListDatumRequest.SerializeToString,
            ),
            "CreateDatum": grpc.stream_stream_rpc_method_handler(
                self.create_datum,
                request_deserializer=CreateDatumRequest.FromString,
                response_serializer=CreateDatumRequest.SerializeToString,
            ),
            "RestartDatum": grpc.unary_unary_rpc_method_handler(
                self.restart_datum,
                request_deserializer=RestartDatumRequest.FromString,
                response_serializer=RestartDatumRequest.SerializeToString,
            ),
            "RerunPipeline": grpc.unary_unary_rpc_method_handler(
                self.rerun_pipeline,
                request_deserializer=RerunPipelineRequest.FromString,
                response_serializer=RerunPipelineRequest.SerializeToString,
            ),
            "CreatePipeline": grpc.unary_unary_rpc_method_handler(
                self.create_pipeline,
                request_deserializer=CreatePipelineRequest.FromString,
                response_serializer=CreatePipelineRequest.SerializeToString,
            ),
            "CreatePipelineV2": grpc.unary_unary_rpc_method_handler(
                self.create_pipeline_v2,
                request_deserializer=CreatePipelineV2Request.FromString,
                response_serializer=CreatePipelineV2Request.SerializeToString,
            ),
            "InspectPipeline": grpc.unary_unary_rpc_method_handler(
                self.inspect_pipeline,
                request_deserializer=InspectPipelineRequest.FromString,
                response_serializer=InspectPipelineRequest.SerializeToString,
            ),
            "ListPipeline": grpc.unary_stream_rpc_method_handler(
                self.list_pipeline,
                request_deserializer=ListPipelineRequest.FromString,
                response_serializer=ListPipelineRequest.SerializeToString,
            ),
            "DeletePipeline": grpc.unary_unary_rpc_method_handler(
                self.delete_pipeline,
                request_deserializer=DeletePipelineRequest.FromString,
                response_serializer=DeletePipelineRequest.SerializeToString,
            ),
            "DeletePipelines": grpc.unary_unary_rpc_method_handler(
                self.delete_pipelines,
                request_deserializer=DeletePipelinesRequest.FromString,
                response_serializer=DeletePipelinesRequest.SerializeToString,
            ),
            "StartPipeline": grpc.unary_unary_rpc_method_handler(
                self.start_pipeline,
                request_deserializer=StartPipelineRequest.FromString,
                response_serializer=StartPipelineRequest.SerializeToString,
            ),
            "StopPipeline": grpc.unary_unary_rpc_method_handler(
                self.stop_pipeline,
                request_deserializer=StopPipelineRequest.FromString,
                response_serializer=StopPipelineRequest.SerializeToString,
            ),
            "RunPipeline": grpc.unary_unary_rpc_method_handler(
                self.run_pipeline,
                request_deserializer=RunPipelineRequest.FromString,
                response_serializer=RunPipelineRequest.SerializeToString,
            ),
            "RunCron": grpc.unary_unary_rpc_method_handler(
                self.run_cron,
                request_deserializer=RunCronRequest.FromString,
                response_serializer=RunCronRequest.SerializeToString,
            ),
            "CheckStatus": grpc.unary_stream_rpc_method_handler(
                self.check_status,
                request_deserializer=CheckStatusRequest.FromString,
                response_serializer=CheckStatusRequest.SerializeToString,
            ),
            "CreateSecret": grpc.unary_unary_rpc_method_handler(
                self.create_secret,
                request_deserializer=CreateSecretRequest.FromString,
                response_serializer=CreateSecretRequest.SerializeToString,
            ),
            "DeleteSecret": grpc.unary_unary_rpc_method_handler(
                self.delete_secret,
                request_deserializer=DeleteSecretRequest.FromString,
                response_serializer=DeleteSecretRequest.SerializeToString,
            ),
            "ListSecret": grpc.unary_unary_rpc_method_handler(
                self.list_secret,
                request_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
                response_serializer=betterproto_lib_google_protobuf.Empty.SerializeToString,
            ),
            "InspectSecret": grpc.unary_unary_rpc_method_handler(
                self.inspect_secret,
                request_deserializer=InspectSecretRequest.FromString,
                response_serializer=InspectSecretRequest.SerializeToString,
            ),
            "DeleteAll": grpc.unary_unary_rpc_method_handler(
                self.delete_all,
                request_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
                response_serializer=betterproto_lib_google_protobuf.Empty.SerializeToString,
            ),
            "GetLogs": grpc.unary_stream_rpc_method_handler(
                self.get_logs,
                request_deserializer=GetLogsRequest.FromString,
                response_serializer=GetLogsRequest.SerializeToString,
            ),
            "ActivateAuth": grpc.unary_unary_rpc_method_handler(
                self.activate_auth,
                request_deserializer=ActivateAuthRequest.FromString,
                response_serializer=ActivateAuthRequest.SerializeToString,
            ),
            "UpdateJobState": grpc.unary_unary_rpc_method_handler(
                self.update_job_state,
                request_deserializer=UpdateJobStateRequest.FromString,
                response_serializer=UpdateJobStateRequest.SerializeToString,
            ),
            "RunLoadTest": grpc.unary_unary_rpc_method_handler(
                self.run_load_test,
                request_deserializer=RunLoadTestRequest.FromString,
                response_serializer=RunLoadTestRequest.SerializeToString,
            ),
            "RunLoadTestDefault": grpc.unary_unary_rpc_method_handler(
                self.run_load_test_default,
                request_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
                response_serializer=betterproto_lib_google_protobuf.Empty.SerializeToString,
            ),
            "RenderTemplate": grpc.unary_unary_rpc_method_handler(
                self.render_template,
                request_deserializer=RenderTemplateRequest.FromString,
                response_serializer=RenderTemplateRequest.SerializeToString,
            ),
            "ListTask": grpc.unary_stream_rpc_method_handler(
                self.list_task,
                request_deserializer=_taskapi__.ListTaskRequest.FromString,
                response_serializer=_taskapi__.ListTaskRequest.SerializeToString,
            ),
            "GetKubeEvents": grpc.unary_stream_rpc_method_handler(
                self.get_kube_events,
                request_deserializer=LokiRequest.FromString,
                response_serializer=LokiRequest.SerializeToString,
            ),
            "QueryLoki": grpc.unary_stream_rpc_method_handler(
                self.query_loki,
                request_deserializer=LokiRequest.FromString,
                response_serializer=LokiRequest.SerializeToString,
            ),
            "GetClusterDefaults": grpc.unary_unary_rpc_method_handler(
                self.get_cluster_defaults,
                request_deserializer=GetClusterDefaultsRequest.FromString,
                response_serializer=GetClusterDefaultsRequest.SerializeToString,
            ),
            "SetClusterDefaults": grpc.unary_unary_rpc_method_handler(
                self.set_cluster_defaults,
                request_deserializer=SetClusterDefaultsRequest.FromString,
                response_serializer=SetClusterDefaultsRequest.SerializeToString,
            ),
            "GetProjectDefaults": grpc.unary_unary_rpc_method_handler(
                self.get_project_defaults,
                request_deserializer=GetProjectDefaultsRequest.FromString,
                response_serializer=GetProjectDefaultsRequest.SerializeToString,
            ),
            "SetProjectDefaults": grpc.unary_unary_rpc_method_handler(
                self.set_project_defaults,
                request_deserializer=SetProjectDefaultsRequest.FromString,
                response_serializer=SetProjectDefaultsRequest.SerializeToString,
            ),
        }

Sub-modules

pachyderm_sdk.api.pps.extension

Handwritten classes/methods that augment the existing PPS API.

Classes

class JobState (*args, **kwds)

The base class for protobuf enumerations, all generated enumerations will inherit from this. Bases :class:enum.IntEnum.

Expand source code
class JobState(betterproto.Enum):
    JOB_STATE_UNKNOWN = 0
    JOB_CREATED = 1
    JOB_STARTING = 2
    JOB_RUNNING = 3
    JOB_FAILURE = 4
    JOB_SUCCESS = 5
    JOB_KILLED = 6
    JOB_EGRESSING = 7
    JOB_FINISHING = 8
    JOB_UNRUNNABLE = 9

Ancestors

  • betterproto.Enum
  • enum.IntEnum
  • builtins.int
  • enum.ReprEnum
  • enum.Enum

Class variables

var JOB_STATE_UNKNOWN
var JOB_CREATED
var JOB_STARTING
var JOB_RUNNING
var JOB_FAILURE
var JOB_SUCCESS
var JOB_KILLED
var JOB_EGRESSING
var JOB_FINISHING
var JOB_UNRUNNABLE
class DatumState (*args, **kwds)

The base class for protobuf enumerations, all generated enumerations will inherit from this. Bases :class:enum.IntEnum.

Expand source code
class DatumState(betterproto.Enum):
    UNKNOWN = 0
    FAILED = 1
    SUCCESS = 2
    SKIPPED = 3
    STARTING = 4
    RECOVERED = 5

Ancestors

  • betterproto.Enum
  • enum.IntEnum
  • builtins.int
  • enum.ReprEnum
  • enum.Enum

Class variables

var UNKNOWN
var FAILED
var SUCCESS
var SKIPPED
var STARTING
var RECOVERED
class WorkerState (*args, **kwds)

The base class for protobuf enumerations, all generated enumerations will inherit from this. Bases :class:enum.IntEnum.

Expand source code
class WorkerState(betterproto.Enum):
    WORKER_STATE_UNKNOWN = 0
    POD_RUNNING = 1
    POD_SUCCESS = 2
    POD_FAILED = 3

Ancestors

  • betterproto.Enum
  • enum.IntEnum
  • builtins.int
  • enum.ReprEnum
  • enum.Enum

Class variables

var WORKER_STATE_UNKNOWN
var POD_RUNNING
var POD_SUCCESS
var POD_FAILED
class PipelineState (*args, **kwds)

The base class for protobuf enumerations, all generated enumerations will inherit from this. Bases :class:enum.IntEnum.

Expand source code
class PipelineState(betterproto.Enum):
    PIPELINE_STATE_UNKNOWN = 0
    PIPELINE_STARTING = 1
    """
    There is a PipelineInfo + spec commit, but no RC This happens when a
    pipeline has been created but not yet picked up by a PPS server.
    """

    PIPELINE_RUNNING = 2
    """
    A pipeline has a spec commit and a service + RC This is the normal state of
    a pipeline.
    """

    PIPELINE_RESTARTING = 3
    """
    Equivalent to STARTING (there is a PipelineInfo + commit, but no RC) After
    some error caused runPipeline to exit, but before the pipeline is re-run.
    This is when the exponential backoff is in effect.
    """

    PIPELINE_FAILURE = 4
    """
    The pipeline has encountered unrecoverable errors and is no longer being
    retried. It won't leave this state until the pipeline is updated.
    """

    PIPELINE_PAUSED = 5
    """
    The pipeline has been explicitly paused by the user (the pipeline spec's
    Stopped field should be true if the pipeline is in this state)
    """

    PIPELINE_STANDBY = 6
    """
    The pipeline is fully functional, but there are no commits to process.
    """

    PIPELINE_CRASHING = 7
    """
    The pipeline's workers are crashing, or failing to come up, this may
    resolve itself, the pipeline may make progress while in this state if the
    problem is only being experienced by some workers.
    """

Ancestors

  • betterproto.Enum
  • enum.IntEnum
  • builtins.int
  • enum.ReprEnum
  • enum.Enum

Class variables

var PIPELINE_STATE_UNKNOWN
var PIPELINE_STARTING

There is a PipelineInfo + spec commit, but no RC This happens when a pipeline has been created but not yet picked up by a PPS server.

var PIPELINE_RUNNING

A pipeline has a spec commit and a service + RC This is the normal state of a pipeline.

var PIPELINE_RESTARTING

Equivalent to STARTING (there is a PipelineInfo + commit, but no RC) After some error caused runPipeline to exit, but before the pipeline is re-run. This is when the exponential backoff is in effect.

var PIPELINE_FAILURE

The pipeline has encountered unrecoverable errors and is no longer being retried. It won't leave this state until the pipeline is updated.

var PIPELINE_PAUSED

The pipeline has been explicitly paused by the user (the pipeline spec's Stopped field should be true if the pipeline is in this state)

var PIPELINE_STANDBY

The pipeline is fully functional, but there are no commits to process.

var PIPELINE_CRASHING

The pipeline's workers are crashing, or failing to come up, this may resolve itself, the pipeline may make progress while in this state if the problem is only being experienced by some workers.

class TolerationOperator (*args, **kwds)

TolerationOperator relates a Toleration's key to its value.

Expand source code
class TolerationOperator(betterproto.Enum):
    """TolerationOperator relates a Toleration's key to its value."""

    EMPTY = 0
    EXISTS = 1
    EQUAL = 2

Ancestors

  • betterproto.Enum
  • enum.IntEnum
  • builtins.int
  • enum.ReprEnum
  • enum.Enum

Class variables

var EMPTY
var EXISTS
var EQUAL
class TaintEffect (*args, **kwds)

TaintEffect is an effect that can be matched by a toleration.

Expand source code
class TaintEffect(betterproto.Enum):
    """TaintEffect is an effect that can be matched by a toleration."""

    ALL_EFFECTS = 0
    NO_SCHEDULE = 1
    PREFER_NO_SCHEDULE = 2
    NO_EXECUTE = 3

Ancestors

  • betterproto.Enum
  • enum.IntEnum
  • builtins.int
  • enum.ReprEnum
  • enum.Enum

Class variables

var ALL_EFFECTS
var NO_SCHEDULE
var PREFER_NO_SCHEDULE
var NO_EXECUTE
class PipelineInfoPipelineType (*args, **kwds)

The pipeline type is stored here so that we can internally know the type of the pipeline without loading the spec from PFS.

Expand source code
class PipelineInfoPipelineType(betterproto.Enum):
    """
    The pipeline type is stored here so that we can internally know the type of
    the pipeline without loading the spec from PFS.
    """

    PIPELINT_TYPE_UNKNOWN = 0
    PIPELINE_TYPE_TRANSFORM = 1
    PIPELINE_TYPE_SPOUT = 2
    PIPELINE_TYPE_SERVICE = 3

Ancestors

  • betterproto.Enum
  • enum.IntEnum
  • builtins.int
  • enum.ReprEnum
  • enum.Enum

Class variables

var PIPELINT_TYPE_UNKNOWN
var PIPELINE_TYPE_TRANSFORM
var PIPELINE_TYPE_SPOUT
var PIPELINE_TYPE_SERVICE
class SecretMount (name: str = None, key: str = None, mount_path: str = None, env_var: str = None)

SecretMount(name: str = None, key: str = None, mount_path: str = None, env_var: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class SecretMount(betterproto.Message):
    name: str = betterproto.string_field(1)
    """Name must be the name of the secret in kubernetes."""

    key: str = betterproto.string_field(2)
    """
    Key of the secret to load into env_var, this field only has meaning if
    EnvVar != "".
    """

    mount_path: str = betterproto.string_field(3)
    env_var: str = betterproto.string_field(4)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var name : str

Name must be the name of the secret in kubernetes.

var key : str

Key of the secret to load into env_var, this field only has meaning if EnvVar != "".

var mount_path : str
var env_var : str
class Transform (image: str = None, cmd: List[str] = None, err_cmd: List[str] = None, env: Dict[str, str] = None, secrets: List[ForwardRef('SecretMount')] = None, image_pull_secrets: List[str] = None, stdin: List[str] = None, err_stdin: List[str] = None, accept_return_code: List[int] = None, debug: bool = None, user: str = None, working_dir: str = None, dockerfile: str = None, memory_volume: bool = None, datum_batching: bool = None)

Transform(image: str = None, cmd: List[str] = None, err_cmd: List[str] = None, env: Dict[str, str] = None, secrets: List[ForwardRef('SecretMount')] = None, image_pull_secrets: List[str] = None, stdin: List[str] = None, err_stdin: List[str] = None, accept_return_code: List[int] = None, debug: bool = None, user: str = None, working_dir: str = None, dockerfile: str = None, memory_volume: bool = None, datum_batching: bool = None)

Expand source code
@dataclass(eq=False, repr=False)
class Transform(betterproto.Message):
    image: str = betterproto.string_field(1)
    cmd: List[str] = betterproto.string_field(2)
    err_cmd: List[str] = betterproto.string_field(3)
    env: Dict[str, str] = betterproto.map_field(
        4, betterproto.TYPE_STRING, betterproto.TYPE_STRING
    )
    secrets: List["SecretMount"] = betterproto.message_field(5)
    image_pull_secrets: List[str] = betterproto.string_field(6)
    stdin: List[str] = betterproto.string_field(7)
    err_stdin: List[str] = betterproto.string_field(8)
    accept_return_code: List[int] = betterproto.int64_field(9)
    debug: bool = betterproto.bool_field(10)
    user: str = betterproto.string_field(11)
    working_dir: str = betterproto.string_field(12)
    dockerfile: str = betterproto.string_field(13)
    memory_volume: bool = betterproto.bool_field(14)
    datum_batching: bool = betterproto.bool_field(15)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var image : str
var cmd : List[str]
var err_cmd : List[str]
var env : Dict[str, str]
var secrets : List[SecretMount]
var image_pull_secrets : List[str]
var stdin : List[str]
var err_stdin : List[str]
var accept_return_code : List[int]
var debug : bool
var user : str
var working_dir : str
var dockerfile : str
var memory_volume : bool
var datum_batching : bool
class TfJob (tf_job: str = None)

TfJob(tf_job: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class TfJob(betterproto.Message):
    tf_job: str = betterproto.string_field(1)
    """
    tf_job  is a serialized Kubeflow TFJob spec. Pachyderm sends this directly
    to a kubernetes cluster on which kubeflow has been installed, instead of
    creating a pipeline ReplicationController as it normally would.
    """

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var tf_job : str

tf_job is a serialized Kubeflow TFJob spec. Pachyderm sends this directly to a kubernetes cluster on which kubeflow has been installed, instead of creating a pipeline ReplicationController as it normally would.

class Egress (url: str = None, object_storage: _pfs__.ObjectStorageEgress = None, sql_database: _pfs__.SqlDatabaseEgress = None)

Egress(url: str = None, object_storage: '_pfs__.ObjectStorageEgress' = None, sql_database: '_pfs__.SqlDatabaseEgress' = None)

Expand source code
@dataclass(eq=False, repr=False)
class Egress(betterproto.Message):
    url: str = betterproto.string_field(1)
    object_storage: "_pfs__.ObjectStorageEgress" = betterproto.message_field(
        2, group="target"
    )
    sql_database: "_pfs__.SqlDatabaseEgress" = betterproto.message_field(
        3, group="target"
    )

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var url : str
var object_storage : ObjectStorageEgress
var sql_database : SqlDatabaseEgress
class Determined (workspaces: List[str] = None)

Determined(workspaces: List[str] = None)

Expand source code
@dataclass(eq=False, repr=False)
class Determined(betterproto.Message):
    workspaces: List[str] = betterproto.string_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var workspaces : List[str]
class Job (pipeline: Pipeline = None, id: str = None)

Job(pipeline: 'Pipeline' = None, id: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class Job(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)
    id: str = betterproto.string_field(2)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var pipeline : Pipeline
var id : str
class Metadata (annotations: Dict[str, str] = None, labels: Dict[str, str] = None)

Metadata(annotations: Dict[str, str] = None, labels: Dict[str, str] = None)

Expand source code
@dataclass(eq=False, repr=False)
class Metadata(betterproto.Message):
    annotations: Dict[str, str] = betterproto.map_field(
        1, betterproto.TYPE_STRING, betterproto.TYPE_STRING
    )
    labels: Dict[str, str] = betterproto.map_field(
        2, betterproto.TYPE_STRING, betterproto.TYPE_STRING
    )

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var annotations : Dict[str, str]
var labels : Dict[str, str]
class Service (internal_port: int = None, external_port: int = None, ip: str = None, type: str = None)

Service(internal_port: int = None, external_port: int = None, ip: str = None, type: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class Service(betterproto.Message):
    internal_port: int = betterproto.int32_field(1)
    external_port: int = betterproto.int32_field(2)
    ip: str = betterproto.string_field(3)
    type: str = betterproto.string_field(4)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var internal_port : int
var external_port : int
var ip : str
var type : str
class Spout (service: Service = None)

Spout(service: 'Service' = None)

Expand source code
@dataclass(eq=False, repr=False)
class Spout(betterproto.Message):
    service: "Service" = betterproto.message_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var service : Service
class PfsInput (project: str = None, name: str = None, repo: str = None, repo_type: str = None, branch: str = None, commit: str = None, glob: str = None, join_on: str = None, outer_join: bool = None, group_by: str = None, lazy: bool = None, empty_files: bool = None, s3: bool = None, trigger: _pfs__.Trigger = None)

PfsInput(project: str = None, name: str = None, repo: str = None, repo_type: str = None, branch: str = None, commit: str = None, glob: str = None, join_on: str = None, outer_join: bool = None, group_by: str = None, lazy: bool = None, empty_files: bool = None, s3: bool = None, trigger: '_pfs__.Trigger' = None)

Expand source code
@dataclass(eq=False, repr=False)
class PfsInput(betterproto.Message):
    project: str = betterproto.string_field(14)
    name: str = betterproto.string_field(1)
    repo: str = betterproto.string_field(2)
    repo_type: str = betterproto.string_field(13)
    branch: str = betterproto.string_field(3)
    commit: str = betterproto.string_field(4)
    glob: str = betterproto.string_field(5)
    join_on: str = betterproto.string_field(6)
    outer_join: bool = betterproto.bool_field(7)
    group_by: str = betterproto.string_field(8)
    lazy: bool = betterproto.bool_field(9)
    empty_files: bool = betterproto.bool_field(10)
    """
    EmptyFiles, if true, will cause files from this PFS input to be presented
    as empty files. This is useful in shuffle pipelines where you want to read
    the names of files and reorganize them using symlinks.
    """

    s3: bool = betterproto.bool_field(11)
    """
    S3, if true, will cause the worker to NOT download or link files from this
    input into the /pfs directory. Instead, an instance of our S3 gateway
    service will run on each of the sidecars, and data can be retrieved from
    this input by querying http://<pipeline>-s3.<namespace>/<job
    id>.<input>/my/file
    """

    trigger: "_pfs__.Trigger" = betterproto.message_field(12)
    """
    Trigger defines when this input is processed by the pipeline, if it's nil
    the input is processed anytime something is committed to the input branch.
    """

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var project : str
var name : str
var repo : str
var repo_type : str
var branch : str
var commit : str
var glob : str
var join_on : str
var outer_join : bool
var group_by : str
var lazy : bool
var empty_files : bool

EmptyFiles, if true, will cause files from this PFS input to be presented as empty files. This is useful in shuffle pipelines where you want to read the names of files and reorganize them using symlinks.

var s3 : bool

S3, if true, will cause the worker to NOT download or link files from this input into the /pfs directory. Instead, an instance of our S3 gateway service will run on each of the sidecars, and data can be retrieved from this input by querying >-s3././my/file

var trigger : Trigger

Trigger defines when this input is processed by the pipeline, if it's nil the input is processed anytime something is committed to the input branch.

class CronInput (name: str = None, project: str = None, repo: str = None, commit: str = None, spec: str = None, overwrite: bool = None, start: datetime.datetime = None)

CronInput(name: str = None, project: str = None, repo: str = None, commit: str = None, spec: str = None, overwrite: bool = None, start: datetime.datetime = None)

Expand source code
@dataclass(eq=False, repr=False)
class CronInput(betterproto.Message):
    name: str = betterproto.string_field(1)
    project: str = betterproto.string_field(7)
    repo: str = betterproto.string_field(2)
    commit: str = betterproto.string_field(3)
    spec: str = betterproto.string_field(4)
    overwrite: bool = betterproto.bool_field(5)
    """
    Overwrite, if true, will expose a single datum that gets overwritten each
    tick. If false, it will create a new datum for each tick.
    """

    start: datetime = betterproto.message_field(6)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var name : str
var project : str
var repo : str
var commit : str
var spec : str
var overwrite : bool

Overwrite, if true, will expose a single datum that gets overwritten each tick. If false, it will create a new datum for each tick.

var start : datetime.datetime
class Input (pfs: PfsInput = None, join: List[ForwardRef('Input')] = None, group: List[ForwardRef('Input')] = None, cross: List[ForwardRef('Input')] = None, union: List[ForwardRef('Input')] = None, cron: CronInput = None)

Input(pfs: 'PfsInput' = None, join: List[ForwardRef('Input')] = None, group: List[ForwardRef('Input')] = None, cross: List[ForwardRef('Input')] = None, union: List[ForwardRef('Input')] = None, cron: 'CronInput' = None)

Expand source code
@dataclass(eq=False, repr=False)
class Input(betterproto.Message):
    pfs: "PfsInput" = betterproto.message_field(1)
    join: List["Input"] = betterproto.message_field(2)
    group: List["Input"] = betterproto.message_field(3)
    cross: List["Input"] = betterproto.message_field(4)
    union: List["Input"] = betterproto.message_field(5)
    cron: "CronInput" = betterproto.message_field(6)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var pfs : PfsInput
var join : List[Input]
var group : List[Input]
var cross : List[Input]
var union : List[Input]
var cron : CronInput
class JobInput (name: str = None, commit: _pfs__.Commit = None, glob: str = None, lazy: bool = None)

JobInput(name: str = None, commit: '_pfs__.Commit' = None, glob: str = None, lazy: bool = None)

Expand source code
@dataclass(eq=False, repr=False)
class JobInput(betterproto.Message):
    name: str = betterproto.string_field(1)
    commit: "_pfs__.Commit" = betterproto.message_field(2)
    glob: str = betterproto.string_field(3)
    lazy: bool = betterproto.bool_field(4)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var name : str
var commit : Commit
var glob : str
var lazy : bool
class ParallelismSpec (constant: int = None)

ParallelismSpec(constant: int = None)

Expand source code
@dataclass(eq=False, repr=False)
class ParallelismSpec(betterproto.Message):
    constant: int = betterproto.uint64_field(1)
    """
    Starts the pipeline/job with a 'constant' workers, unless 'constant' is
    zero. If 'constant' is zero (which is the zero value of ParallelismSpec),
    then Pachyderm will choose the number of workers that is started,
    (currently it chooses the number of workers in the cluster)
    """

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var constant : int

Starts the pipeline/job with a 'constant' workers, unless 'constant' is zero. If 'constant' is zero (which is the zero value of ParallelismSpec), then Pachyderm will choose the number of workers that is started, (currently it chooses the number of workers in the cluster)

class InputFile (path: str = None, hash: bytes = None)

InputFile(path: str = None, hash: bytes = None)

Expand source code
@dataclass(eq=False, repr=False)
class InputFile(betterproto.Message):
    path: str = betterproto.string_field(1)
    """This file's absolute path within its pfs repo."""

    hash: bytes = betterproto.bytes_field(2)
    """This file's hash"""

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var path : str

This file's absolute path within its pfs repo.

var hash : bytes

This file's hash

class Datum (job: Job = None, id: str = None)

Datum(job: 'Job' = None, id: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class Datum(betterproto.Message):
    job: "Job" = betterproto.message_field(1)
    """ID is the hash computed from all the files"""

    id: str = betterproto.string_field(2)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var job : Job

ID is the hash computed from all the files

var id : str
class DatumInfo (datum: Datum = None, state: DatumState = None, stats: ProcessStats = None, pfs_state: _pfs__.File = None, data: List[ForwardRef('_pfs__.FileInfo')] = None, image_id: str = None)

DatumInfo(datum: 'Datum' = None, state: 'DatumState' = None, stats: 'ProcessStats' = None, pfs_state: '_pfs__.File' = None, data: List[ForwardRef('_pfs__.FileInfo')] = None, image_id: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class DatumInfo(betterproto.Message):
    datum: "Datum" = betterproto.message_field(1)
    state: "DatumState" = betterproto.enum_field(2)
    stats: "ProcessStats" = betterproto.message_field(3)
    pfs_state: "_pfs__.File" = betterproto.message_field(4)
    data: List["_pfs__.FileInfo"] = betterproto.message_field(5)
    image_id: str = betterproto.string_field(6)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var datum : Datum
var state : DatumState
var stats : ProcessStats
var pfs_state : File
var data : List[FileInfo]
var image_id : str
class Aggregate (count: int = None, mean: float = None, stddev: float = None, fifth_percentile: float = None, ninety_fifth_percentile: float = None)

Aggregate(count: int = None, mean: float = None, stddev: float = None, fifth_percentile: float = None, ninety_fifth_percentile: float = None)

Expand source code
@dataclass(eq=False, repr=False)
class Aggregate(betterproto.Message):
    count: int = betterproto.int64_field(1)
    mean: float = betterproto.double_field(2)
    stddev: float = betterproto.double_field(3)
    fifth_percentile: float = betterproto.double_field(4)
    ninety_fifth_percentile: float = betterproto.double_field(5)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var count : int
var mean : float
var stddev : float
var fifth_percentile : float
var ninety_fifth_percentile : float
class ProcessStats (download_time: datetime.timedelta = None, process_time: datetime.timedelta = None, upload_time: datetime.timedelta = None, download_bytes: int = None, upload_bytes: int = None)

ProcessStats(download_time: datetime.timedelta = None, process_time: datetime.timedelta = None, upload_time: datetime.timedelta = None, download_bytes: int = None, upload_bytes: int = None)

Expand source code
@dataclass(eq=False, repr=False)
class ProcessStats(betterproto.Message):
    download_time: timedelta = betterproto.message_field(1)
    process_time: timedelta = betterproto.message_field(2)
    upload_time: timedelta = betterproto.message_field(3)
    download_bytes: int = betterproto.int64_field(4)
    upload_bytes: int = betterproto.int64_field(5)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var download_time : datetime.timedelta
var process_time : datetime.timedelta
var upload_time : datetime.timedelta
var download_bytes : int
var upload_bytes : int
class AggregateProcessStats (download_time: Aggregate = None, process_time: Aggregate = None, upload_time: Aggregate = None, download_bytes: Aggregate = None, upload_bytes: Aggregate = None)

AggregateProcessStats(download_time: 'Aggregate' = None, process_time: 'Aggregate' = None, upload_time: 'Aggregate' = None, download_bytes: 'Aggregate' = None, upload_bytes: 'Aggregate' = None)

Expand source code
@dataclass(eq=False, repr=False)
class AggregateProcessStats(betterproto.Message):
    download_time: "Aggregate" = betterproto.message_field(1)
    process_time: "Aggregate" = betterproto.message_field(2)
    upload_time: "Aggregate" = betterproto.message_field(3)
    download_bytes: "Aggregate" = betterproto.message_field(4)
    upload_bytes: "Aggregate" = betterproto.message_field(5)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var download_time : Aggregate
var process_time : Aggregate
var upload_time : Aggregate
var download_bytes : Aggregate
var upload_bytes : Aggregate
class WorkerStatus (worker_id: str = None, job_id: str = None, datum_status: DatumStatus = None)

WorkerStatus(worker_id: str = None, job_id: str = None, datum_status: 'DatumStatus' = None)

Expand source code
@dataclass(eq=False, repr=False)
class WorkerStatus(betterproto.Message):
    worker_id: str = betterproto.string_field(1)
    job_id: str = betterproto.string_field(2)
    datum_status: "DatumStatus" = betterproto.message_field(3)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var worker_id : str
var job_id : str
var datum_status : DatumStatus
class DatumStatus (started: datetime.datetime = None, data: List[ForwardRef('InputFile')] = None)

DatumStatus(started: datetime.datetime = None, data: List[ForwardRef('InputFile')] = None)

Expand source code
@dataclass(eq=False, repr=False)
class DatumStatus(betterproto.Message):
    started: datetime = betterproto.message_field(1)
    """Started is the time processing on the current datum began."""

    data: List["InputFile"] = betterproto.message_field(2)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var started : datetime.datetime

Started is the time processing on the current datum began.

var data : List[InputFile]
class ResourceSpec (cpu: float = None, memory: str = None, gpu: GpuSpec = None, disk: str = None)

ResourceSpec describes the amount of resources that pipeline pods should request from kubernetes, for scheduling.

Expand source code
@dataclass(eq=False, repr=False)
class ResourceSpec(betterproto.Message):
    """
    ResourceSpec describes the amount of resources that pipeline pods should
    request from kubernetes, for scheduling.
    """

    cpu: float = betterproto.float_field(1)
    """
    The number of CPUs each worker needs (partial values are allowed, and
    encouraged)
    """

    memory: str = betterproto.string_field(2)
    """
    The amount of memory each worker needs (in bytes, with allowed SI suffixes
    (M, K, G, Mi, Ki, Gi, etc).
    """

    gpu: "GpuSpec" = betterproto.message_field(3)
    """The spec for GPU resources."""

    disk: str = betterproto.string_field(4)
    """
    The amount of ephemeral storage each worker needs (in bytes, with allowed
    SI suffixes (M, K, G, Mi, Ki, Gi, etc).
    """

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var cpu : float

The number of CPUs each worker needs (partial values are allowed, and encouraged)

var memory : str

The amount of memory each worker needs (in bytes, with allowed SI suffixes (M, K, G, Mi, Ki, Gi, etc).

var gpu : GpuSpec

The spec for GPU resources.

var disk : str

The amount of ephemeral storage each worker needs (in bytes, with allowed SI suffixes (M, K, G, Mi, Ki, Gi, etc).

class GpuSpec (type: str = None, number: int = None)

GpuSpec(type: str = None, number: int = None)

Expand source code
@dataclass(eq=False, repr=False)
class GpuSpec(betterproto.Message):
    type: str = betterproto.string_field(1)
    """The type of GPU (nvidia.com/gpu or amd.com/gpu for example)."""

    number: int = betterproto.int64_field(2)
    """The number of GPUs to request."""

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var type : str

The type of GPU (nvidia.com/gpu or amd.com/gpu for example).

var number : int

The number of GPUs to request.

class JobSetInfo (job_set: JobSet = None, jobs: List[ForwardRef('JobInfo')] = None)

JobSetInfo(job_set: 'JobSet' = None, jobs: List[ForwardRef('JobInfo')] = None)

Expand source code
@dataclass(eq=False, repr=False)
class JobSetInfo(betterproto.Message):
    job_set: "JobSet" = betterproto.message_field(1)
    jobs: List["JobInfo"] = betterproto.message_field(2)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var job_set : JobSet
var jobs : List[JobInfo]
class JobInfo (job: Job = None, pipeline_version: int = None, output_commit: _pfs__.Commit = None, restart: int = None, data_processed: int = None, data_skipped: int = None, data_total: int = None, data_failed: int = None, data_recovered: int = None, stats: ProcessStats = None, state: JobState = None, reason: str = None, created: datetime.datetime = None, started: datetime.datetime = None, finished: datetime.datetime = None, details: JobInfoDetails = None, auth_token: str = None)

JobInfo is the data stored in the database regarding a given job. The 'details' field contains more information about the job which is expensive to fetch, requiring querying workers or loading the pipeline spec from object storage.

Expand source code
@dataclass(eq=False, repr=False)
class JobInfo(betterproto.Message):
    """
    JobInfo is the data stored in the database regarding a given job.  The
    'details' field contains more information about the job which is expensive
    to fetch, requiring querying workers or loading the pipeline spec from
    object storage.
    """

    job: "Job" = betterproto.message_field(1)
    pipeline_version: int = betterproto.uint64_field(2)
    output_commit: "_pfs__.Commit" = betterproto.message_field(3)
    restart: int = betterproto.uint64_field(4)
    """Job restart count (e.g. due to datum failure)"""

    data_processed: int = betterproto.int64_field(5)
    """Counts of how many times we processed or skipped a datum"""

    data_skipped: int = betterproto.int64_field(6)
    data_total: int = betterproto.int64_field(7)
    data_failed: int = betterproto.int64_field(8)
    data_recovered: int = betterproto.int64_field(9)
    stats: "ProcessStats" = betterproto.message_field(10)
    """Download/process/upload time and download/upload bytes"""

    state: "JobState" = betterproto.enum_field(11)
    reason: str = betterproto.string_field(12)
    created: datetime = betterproto.message_field(13)
    started: datetime = betterproto.message_field(14)
    finished: datetime = betterproto.message_field(15)
    details: "JobInfoDetails" = betterproto.message_field(16)
    auth_token: str = betterproto.string_field(17)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var job : Job
var pipeline_version : int
var output_commit : Commit
var restart : int

Job restart count (e.g. due to datum failure)

var data_processed : int

Counts of how many times we processed or skipped a datum

var data_skipped : int
var data_total : int
var data_failed : int
var data_recovered : int
var stats : ProcessStats

Download/process/upload time and download/upload bytes

var state : JobState
var reason : str
var created : datetime.datetime
var started : datetime.datetime
var finished : datetime.datetime
var details : JobInfoDetails
var auth_token : str
class JobInfoDetails (transform: Transform = None, parallelism_spec: ParallelismSpec = None, egress: Egress = None, service: Service = None, spout: Spout = None, worker_status: List[ForwardRef('WorkerStatus')] = None, resource_requests: ResourceSpec = None, resource_limits: ResourceSpec = None, sidecar_resource_limits: ResourceSpec = None, input: Input = None, salt: str = None, datum_set_spec: DatumSetSpec = None, datum_timeout: datetime.timedelta = None, job_timeout: datetime.timedelta = None, datum_tries: int = None, scheduling_spec: SchedulingSpec = None, pod_spec: str = None, pod_patch: str = None, sidecar_resource_requests: ResourceSpec = None)

JobInfoDetails(transform: 'Transform' = None, parallelism_spec: 'ParallelismSpec' = None, egress: 'Egress' = None, service: 'Service' = None, spout: 'Spout' = None, worker_status: List[ForwardRef('WorkerStatus')] = None, resource_requests: 'ResourceSpec' = None, resource_limits: 'ResourceSpec' = None, sidecar_resource_limits: 'ResourceSpec' = None, input: 'Input' = None, salt: str = None, datum_set_spec: 'DatumSetSpec' = None, datum_timeout: datetime.timedelta = None, job_timeout: datetime.timedelta = None, datum_tries: int = None, scheduling_spec: 'SchedulingSpec' = None, pod_spec: str = None, pod_patch: str = None, sidecar_resource_requests: 'ResourceSpec' = None)

Expand source code
@dataclass(eq=False, repr=False)
class JobInfoDetails(betterproto.Message):
    transform: "Transform" = betterproto.message_field(1)
    parallelism_spec: "ParallelismSpec" = betterproto.message_field(2)
    egress: "Egress" = betterproto.message_field(3)
    service: "Service" = betterproto.message_field(4)
    spout: "Spout" = betterproto.message_field(5)
    worker_status: List["WorkerStatus"] = betterproto.message_field(6)
    resource_requests: "ResourceSpec" = betterproto.message_field(7)
    resource_limits: "ResourceSpec" = betterproto.message_field(8)
    sidecar_resource_limits: "ResourceSpec" = betterproto.message_field(9)
    input: "Input" = betterproto.message_field(10)
    salt: str = betterproto.string_field(11)
    datum_set_spec: "DatumSetSpec" = betterproto.message_field(12)
    datum_timeout: timedelta = betterproto.message_field(13)
    job_timeout: timedelta = betterproto.message_field(14)
    datum_tries: int = betterproto.int64_field(15)
    scheduling_spec: "SchedulingSpec" = betterproto.message_field(16)
    pod_spec: str = betterproto.string_field(17)
    pod_patch: str = betterproto.string_field(18)
    sidecar_resource_requests: "ResourceSpec" = betterproto.message_field(19)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var transform : Transform
var parallelism_spec : ParallelismSpec
var egress : Egress
var service : Service
var spout : Spout
var worker_status : List[WorkerStatus]
var resource_requests : ResourceSpec
var resource_limits : ResourceSpec
var sidecar_resource_limits : ResourceSpec
var input : Input
var salt : str
var datum_set_spec : DatumSetSpec
var datum_timeout : datetime.timedelta
var job_timeout : datetime.timedelta
var datum_tries : int
var scheduling_spec : SchedulingSpec
var pod_spec : str
var pod_patch : str
var sidecar_resource_requests : ResourceSpec
class Worker (name: str = None, state: WorkerState = None)

Worker(name: str = None, state: 'WorkerState' = None)

Expand source code
@dataclass(eq=False, repr=False)
class Worker(betterproto.Message):
    name: str = betterproto.string_field(1)
    state: "WorkerState" = betterproto.enum_field(2)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var name : str
var state : WorkerState
class Pipeline (project: _pfs__.Project = None, name: str = None)

Pipeline(project: '_pfs__.Project' = None, name: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class Pipeline(betterproto.Message):
    project: "_pfs__.Project" = betterproto.message_field(2)
    name: str = betterproto.string_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var project : Project
var name : str
class Toleration (key: str = None, operator: TolerationOperator = None, value: str = None, effect: TaintEffect = None, toleration_seconds: Optional[int] = None)

Toleration is a Kubernetes toleration.

Expand source code
@dataclass(eq=False, repr=False)
class Toleration(betterproto.Message):
    """Toleration is a Kubernetes toleration."""

    key: str = betterproto.string_field(1)
    """
    key is the taint key that the toleration applies to.  Empty means match all
    taint keys.
    """

    operator: "TolerationOperator" = betterproto.enum_field(2)
    """operator represents a key's relationship to the value."""

    value: str = betterproto.string_field(3)
    """value is the taint value the toleration matches to."""

    effect: "TaintEffect" = betterproto.enum_field(4)
    """
    effect indicates the taint effect to match.  Empty means match all taint
    effects.
    """

    toleration_seconds: Optional[int] = betterproto.message_field(
        5, wraps=betterproto.TYPE_INT64
    )
    """
    toleration_seconds represents the period of time the toleration (which must
    be of effect NoExecute, otherwise this field is ignored) tolerates the
    taint.  If not set, tolerate the taint forever.
    """

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var key : str

key is the taint key that the toleration applies to. Empty means match all taint keys.

var operator : TolerationOperator

operator represents a key's relationship to the value.

var value : str

value is the taint value the toleration matches to.

var effect : TaintEffect

effect indicates the taint effect to match. Empty means match all taint effects.

var toleration_seconds : Optional[int]

toleration_seconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. If not set, tolerate the taint forever.

class PipelineInfo (pipeline: Pipeline = None, version: int = None, spec_commit: _pfs__.Commit = None, stopped: bool = None, state: PipelineState = None, reason: str = None, last_job_state: JobState = None, parallelism: int = None, type: PipelineInfoPipelineType = None, auth_token: str = None, details: PipelineInfoDetails = None, user_spec_json: str = None, effective_spec_json: str = None)

PipelineInfo is proto for each pipeline that Pachd stores in the database. It tracks the state of the pipeline, and points to its metadata in PFS (and, by pointing to a PFS commit, de facto tracks the pipeline's version). Any information about the pipeline not stored in the database is in the Details object, which requires fetching the spec from PFS or other potentially expensive operations.

Expand source code
@dataclass(eq=False, repr=False)
class PipelineInfo(betterproto.Message):
    """
    PipelineInfo is proto for each pipeline that Pachd stores in the database.
    It tracks the state of the pipeline, and points to its metadata in PFS
    (and, by pointing to a PFS commit, de facto tracks the pipeline's version).
    Any information about the pipeline _not_ stored in the database is in the
    Details object, which requires fetching the spec from PFS or other
    potentially expensive operations.
    """

    pipeline: "Pipeline" = betterproto.message_field(1)
    version: int = betterproto.uint64_field(2)
    spec_commit: "_pfs__.Commit" = betterproto.message_field(3)
    stopped: bool = betterproto.bool_field(4)
    state: "PipelineState" = betterproto.enum_field(5)
    """state indicates the current state of the pipeline"""

    reason: str = betterproto.string_field(6)
    """reason includes any error messages associated with a failed pipeline"""

    last_job_state: "JobState" = betterproto.enum_field(8)
    """last_job_state indicates the state of the most recently created job"""

    parallelism: int = betterproto.uint64_field(9)
    """
    parallelism tracks the literal number of workers that this pipeline should
    run.
    """

    type: "PipelineInfoPipelineType" = betterproto.enum_field(10)
    auth_token: str = betterproto.string_field(11)
    details: "PipelineInfoDetails" = betterproto.message_field(12)
    user_spec_json: str = betterproto.string_field(13)
    effective_spec_json: str = betterproto.string_field(14)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var pipeline : Pipeline
var version : int
var spec_commit : Commit
var stopped : bool
var state : PipelineState

state indicates the current state of the pipeline

var reason : str

reason includes any error messages associated with a failed pipeline

var last_job_state : JobState

last_job_state indicates the state of the most recently created job

var parallelism : int

parallelism tracks the literal number of workers that this pipeline should run.

var type : PipelineInfoPipelineType
var auth_token : str
var details : PipelineInfoDetails
var user_spec_json : str
var effective_spec_json : str
class PipelineInfoDetails (transform: Transform = None, tf_job: TfJob = None, parallelism_spec: ParallelismSpec = None, egress: Egress = None, created_at: datetime.datetime = None, recent_error: str = None, workers_requested: int = None, workers_available: int = None, output_branch: str = None, resource_requests: ResourceSpec = None, resource_limits: ResourceSpec = None, sidecar_resource_limits: ResourceSpec = None, input: Input = None, description: str = None, salt: str = None, reason: str = None, service: Service = None, spout: Spout = None, datum_set_spec: DatumSetSpec = None, datum_timeout: datetime.timedelta = None, job_timeout: datetime.timedelta = None, datum_tries: int = None, scheduling_spec: SchedulingSpec = None, pod_spec: str = None, pod_patch: str = None, s3_out: bool = None, metadata: Metadata = None, reprocess_spec: str = None, unclaimed_tasks: int = None, worker_rc: str = None, autoscaling: bool = None, tolerations: List[ForwardRef('Toleration')] = None, sidecar_resource_requests: ResourceSpec = None, determined: Determined = None, maximum_expected_uptime: datetime.timedelta = None, workers_started_at: datetime.datetime = None)

PipelineInfoDetails(transform: 'Transform' = None, tf_job: 'TfJob' = None, parallelism_spec: 'ParallelismSpec' = None, egress: 'Egress' = None, created_at: datetime.datetime = None, recent_error: str = None, workers_requested: int = None, workers_available: int = None, output_branch: str = None, resource_requests: 'ResourceSpec' = None, resource_limits: 'ResourceSpec' = None, sidecar_resource_limits: 'ResourceSpec' = None, input: 'Input' = None, description: str = None, salt: str = None, reason: str = None, service: 'Service' = None, spout: 'Spout' = None, datum_set_spec: 'DatumSetSpec' = None, datum_timeout: datetime.timedelta = None, job_timeout: datetime.timedelta = None, datum_tries: int = None, scheduling_spec: 'SchedulingSpec' = None, pod_spec: str = None, pod_patch: str = None, s3_out: bool = None, metadata: 'Metadata' = None, reprocess_spec: str = None, unclaimed_tasks: int = None, worker_rc: str = None, autoscaling: bool = None, tolerations: List[ForwardRef('Toleration')] = None, sidecar_resource_requests: 'ResourceSpec' = None, determined: 'Determined' = None, maximum_expected_uptime: datetime.timedelta = None, workers_started_at: datetime.datetime = None)

Expand source code
@dataclass(eq=False, repr=False)
class PipelineInfoDetails(betterproto.Message):
    transform: "Transform" = betterproto.message_field(1)
    tf_job: "TfJob" = betterproto.message_field(2)
    """
    tf_job encodes a Kubeflow TFJob spec. Pachyderm uses this to create TFJobs
    when running in a kubernetes cluster on which kubeflow has been installed.
    Exactly one of 'tf_job' and 'transform' should be set
    """

    parallelism_spec: "ParallelismSpec" = betterproto.message_field(3)
    egress: "Egress" = betterproto.message_field(4)
    created_at: datetime = betterproto.message_field(5)
    recent_error: str = betterproto.string_field(6)
    workers_requested: int = betterproto.int64_field(7)
    workers_available: int = betterproto.int64_field(8)
    output_branch: str = betterproto.string_field(9)
    resource_requests: "ResourceSpec" = betterproto.message_field(10)
    resource_limits: "ResourceSpec" = betterproto.message_field(11)
    sidecar_resource_limits: "ResourceSpec" = betterproto.message_field(12)
    input: "Input" = betterproto.message_field(13)
    description: str = betterproto.string_field(14)
    salt: str = betterproto.string_field(16)
    reason: str = betterproto.string_field(17)
    service: "Service" = betterproto.message_field(19)
    spout: "Spout" = betterproto.message_field(20)
    datum_set_spec: "DatumSetSpec" = betterproto.message_field(21)
    datum_timeout: timedelta = betterproto.message_field(22)
    job_timeout: timedelta = betterproto.message_field(23)
    datum_tries: int = betterproto.int64_field(24)
    scheduling_spec: "SchedulingSpec" = betterproto.message_field(25)
    pod_spec: str = betterproto.string_field(26)
    pod_patch: str = betterproto.string_field(27)
    s3_out: bool = betterproto.bool_field(28)
    metadata: "Metadata" = betterproto.message_field(29)
    reprocess_spec: str = betterproto.string_field(30)
    unclaimed_tasks: int = betterproto.int64_field(31)
    worker_rc: str = betterproto.string_field(32)
    autoscaling: bool = betterproto.bool_field(33)
    tolerations: List["Toleration"] = betterproto.message_field(34)
    sidecar_resource_requests: "ResourceSpec" = betterproto.message_field(35)
    determined: "Determined" = betterproto.message_field(36)
    maximum_expected_uptime: timedelta = betterproto.message_field(37)
    workers_started_at: datetime = betterproto.message_field(38)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var transform : Transform
var tf_job : TfJob

tf_job encodes a Kubeflow TFJob spec. Pachyderm uses this to create TFJobs when running in a kubernetes cluster on which kubeflow has been installed. Exactly one of 'tf_job' and 'transform' should be set

var parallelism_spec : ParallelismSpec
var egress : Egress
var created_at : datetime.datetime
var recent_error : str
var workers_requested : int
var workers_available : int
var output_branch : str
var resource_requests : ResourceSpec
var resource_limits : ResourceSpec
var sidecar_resource_limits : ResourceSpec
var input : Input
var description : str
var salt : str
var reason : str
var service : Service
var spout : Spout
var datum_set_spec : DatumSetSpec
var datum_timeout : datetime.timedelta
var job_timeout : datetime.timedelta
var datum_tries : int
var scheduling_spec : SchedulingSpec
var pod_spec : str
var pod_patch : str
var s3_out : bool
var metadata : Metadata
var reprocess_spec : str
var unclaimed_tasks : int
var worker_rc : str
var autoscaling : bool
var tolerations : List[Toleration]
var sidecar_resource_requests : ResourceSpec
var determined : Determined
var maximum_expected_uptime : datetime.timedelta
var workers_started_at : datetime.datetime
class PipelineInfos (pipeline_info: List[ForwardRef('PipelineInfo')] = None)

PipelineInfos(pipeline_info: List[ForwardRef('PipelineInfo')] = None)

Expand source code
@dataclass(eq=False, repr=False)
class PipelineInfos(betterproto.Message):
    pipeline_info: List["PipelineInfo"] = betterproto.message_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var pipeline_info : List[PipelineInfo]
class JobSet (id: str = None)

JobSet(id: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class JobSet(betterproto.Message):
    id: str = betterproto.string_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var id : str
class InspectJobSetRequest (job_set: JobSet = None, wait: bool = None, details: bool = None)

InspectJobSetRequest(job_set: 'JobSet' = None, wait: bool = None, details: bool = None)

Expand source code
@dataclass(eq=False, repr=False)
class InspectJobSetRequest(betterproto.Message):
    job_set: "JobSet" = betterproto.message_field(1)
    wait: bool = betterproto.bool_field(2)
    details: bool = betterproto.bool_field(3)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var job_set : JobSet
var wait : bool
var details : bool
class ListJobSetRequest (details: bool = None, projects: List[ForwardRef('_pfs__.Project')] = None, pagination_marker: datetime.datetime = None, number: int = None, reverse: bool = None, jq_filter: str = None)

ListJobSetRequest(details: bool = None, projects: List[ForwardRef('_pfs__.Project')] = None, pagination_marker: datetime.datetime = None, number: int = None, reverse: bool = None, jq_filter: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class ListJobSetRequest(betterproto.Message):
    details: bool = betterproto.bool_field(1)
    projects: List["_pfs__.Project"] = betterproto.message_field(2)
    """A list of projects to filter jobs on, nil means don't filter."""

    pagination_marker: datetime = betterproto.message_field(3)
    """
    we return job sets created before or after this time based on the reverse
    flag
    """

    number: int = betterproto.int64_field(4)
    """number of results to return"""

    reverse: bool = betterproto.bool_field(5)
    """if true, return results in reverse order"""

    jq_filter: str = betterproto.string_field(6)
    """A jq program string for additional result filtering"""

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var details : bool
var projects : List[Project]

A list of projects to filter jobs on, nil means don't filter.

var pagination_marker : datetime.datetime

we return job sets created before or after this time based on the reverse flag

var number : int

number of results to return

var reverse : bool

if true, return results in reverse order

var jq_filter : str

A jq program string for additional result filtering

class InspectJobRequest (job: Job = None, wait: bool = None, details: bool = None)

InspectJobRequest(job: 'Job' = None, wait: bool = None, details: bool = None)

Expand source code
@dataclass(eq=False, repr=False)
class InspectJobRequest(betterproto.Message):
    job: "Job" = betterproto.message_field(1)
    """Callers should set either Job or OutputCommit, not both."""

    wait: bool = betterproto.bool_field(2)
    details: bool = betterproto.bool_field(3)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var job : Job

Callers should set either Job or OutputCommit, not both.

var wait : bool
var details : bool
class ListJobRequest (projects: List[ForwardRef('_pfs__.Project')] = None, pipeline: Pipeline = None, input_commit: List[ForwardRef('_pfs__.Commit')] = None, history: int = None, details: bool = None, jq_filter: str = None, pagination_marker: datetime.datetime = None, number: int = None, reverse: bool = None)

ListJobRequest(projects: List[ForwardRef('_pfs__.Project')] = None, pipeline: 'Pipeline' = None, input_commit: List[ForwardRef('_pfs__.Commit')] = None, history: int = None, details: bool = None, jq_filter: str = None, pagination_marker: datetime.datetime = None, number: int = None, reverse: bool = None)

Expand source code
@dataclass(eq=False, repr=False)
class ListJobRequest(betterproto.Message):
    projects: List["_pfs__.Project"] = betterproto.message_field(7)
    """A list of projects to filter jobs on, nil means don't filter."""

    pipeline: "Pipeline" = betterproto.message_field(1)
    input_commit: List["_pfs__.Commit"] = betterproto.message_field(2)
    history: int = betterproto.int64_field(4)
    """
    History indicates return jobs from historical versions of pipelines
    semantics are: 0: Return jobs from the current version of the pipeline or
    pipelines. 1: Return the above and jobs from the next most recent version
    2: etc.-1: Return jobs from all historical versions.
    """

    details: bool = betterproto.bool_field(5)
    """
    Details indicates whether the result should include all pipeline details in
    each JobInfo, or limited information including name and status, but
    excluding information in the pipeline spec. Leaving this "false" can make
    the call significantly faster in clusters with a large number of pipelines
    and jobs. Note that if 'input_commit' is set, this field is coerced to
    "true"
    """

    jq_filter: str = betterproto.string_field(6)
    """A jq program string for additional result filtering"""

    pagination_marker: datetime = betterproto.message_field(8)
    """timestamp that is pagination marker"""

    number: int = betterproto.int64_field(9)
    """number of results to return"""

    reverse: bool = betterproto.bool_field(10)
    """flag to indicated if results should be returned in reverse order"""

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var projects : List[Project]

A list of projects to filter jobs on, nil means don't filter.

var pipeline : Pipeline
var input_commit : List[Commit]
var history : int

History indicates return jobs from historical versions of pipelines semantics are: 0: Return jobs from the current version of the pipeline or pipelines. 1: Return the above and jobs from the next most recent version 2: etc.-1: Return jobs from all historical versions.

var details : bool

Details indicates whether the result should include all pipeline details in each JobInfo, or limited information including name and status, but excluding information in the pipeline spec. Leaving this "false" can make the call significantly faster in clusters with a large number of pipelines and jobs. Note that if 'input_commit' is set, this field is coerced to "true"

var jq_filter : str

A jq program string for additional result filtering

var pagination_marker : datetime.datetime

timestamp that is pagination marker

var number : int

number of results to return

var reverse : bool

flag to indicated if results should be returned in reverse order

class SubscribeJobRequest (pipeline: Pipeline = None, details: bool = None)

Streams open jobs until canceled

Expand source code
@dataclass(eq=False, repr=False)
class SubscribeJobRequest(betterproto.Message):
    """Streams open jobs until canceled"""

    pipeline: "Pipeline" = betterproto.message_field(1)
    details: bool = betterproto.bool_field(2)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var pipeline : Pipeline
var details : bool
class DeleteJobRequest (job: Job = None)

DeleteJobRequest(job: 'Job' = None)

Expand source code
@dataclass(eq=False, repr=False)
class DeleteJobRequest(betterproto.Message):
    job: "Job" = betterproto.message_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var job : Job
class StopJobRequest (job: Job = None, reason: str = None)

StopJobRequest(job: 'Job' = None, reason: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class StopJobRequest(betterproto.Message):
    job: "Job" = betterproto.message_field(1)
    reason: str = betterproto.string_field(3)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var job : Job
var reason : str
class UpdateJobStateRequest (job: Job = None, state: JobState = None, reason: str = None, restart: int = None, data_processed: int = None, data_skipped: int = None, data_failed: int = None, data_recovered: int = None, data_total: int = None, stats: ProcessStats = None)

UpdateJobStateRequest(job: 'Job' = None, state: 'JobState' = None, reason: str = None, restart: int = None, data_processed: int = None, data_skipped: int = None, data_failed: int = None, data_recovered: int = None, data_total: int = None, stats: 'ProcessStats' = None)

Expand source code
@dataclass(eq=False, repr=False)
class UpdateJobStateRequest(betterproto.Message):
    job: "Job" = betterproto.message_field(1)
    state: "JobState" = betterproto.enum_field(2)
    reason: str = betterproto.string_field(3)
    restart: int = betterproto.uint64_field(5)
    data_processed: int = betterproto.int64_field(6)
    data_skipped: int = betterproto.int64_field(7)
    data_failed: int = betterproto.int64_field(8)
    data_recovered: int = betterproto.int64_field(9)
    data_total: int = betterproto.int64_field(10)
    stats: "ProcessStats" = betterproto.message_field(11)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var job : Job
var state : JobState
var reason : str
var restart : int
var data_processed : int
var data_skipped : int
var data_failed : int
var data_recovered : int
var data_total : int
var stats : ProcessStats
class GetLogsRequest (pipeline: Pipeline = None, job: Job = None, data_filters: List[str] = None, datum: Datum = None, master: bool = None, follow: bool = None, tail: int = None, use_loki_backend: bool = None, since: datetime.timedelta = None)

GetLogsRequest(pipeline: 'Pipeline' = None, job: 'Job' = None, data_filters: List[str] = None, datum: 'Datum' = None, master: bool = None, follow: bool = None, tail: int = None, use_loki_backend: bool = None, since: datetime.timedelta = None)

Expand source code
@dataclass(eq=False, repr=False)
class GetLogsRequest(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)
    """
    The pipeline from which we want to get logs (required if the job in 'job'
    was created as part of a pipeline. To get logs from a non-orphan job
    without the pipeline that created it, you need to use ElasticSearch).
    """

    job: "Job" = betterproto.message_field(2)
    """The job from which we want to get logs."""

    data_filters: List[str] = betterproto.string_field(3)
    """
    Names of input files from which we want processing logs. This may contain
    multiple files, to query pipelines that contain multiple inputs. Each
    filter may be an absolute path of a file within a pps repo, or it may be a
    hash for that file (to search for files at specific versions)
    """

    datum: "Datum" = betterproto.message_field(4)
    master: bool = betterproto.bool_field(5)
    """If true get logs from the master process"""

    follow: bool = betterproto.bool_field(6)
    """Continue to follow new logs as they become available."""

    tail: int = betterproto.int64_field(7)
    """
    If nonzero, the number of lines from the end of the logs to return.  Note:
    tail applies per container, so you will get tail * <number of pods> total
    lines back.
    """

    use_loki_backend: bool = betterproto.bool_field(8)
    """
    UseLokiBackend causes the logs request to go through the loki backend
    rather than through kubernetes. This behavior can also be achieved by
    setting the LOKI_LOGGING feature flag.
    """

    since: timedelta = betterproto.message_field(9)
    """
    Since specifies how far in the past to return logs from. It defaults to 24
    hours.
    """

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var pipeline : Pipeline

The pipeline from which we want to get logs (required if the job in 'job' was created as part of a pipeline. To get logs from a non-orphan job without the pipeline that created it, you need to use ElasticSearch).

var job : Job

The job from which we want to get logs.

var data_filters : List[str]

Names of input files from which we want processing logs. This may contain multiple files, to query pipelines that contain multiple inputs. Each filter may be an absolute path of a file within a pps repo, or it may be a hash for that file (to search for files at specific versions)

var datum : Datum
var master : bool

If true get logs from the master process

var follow : bool

Continue to follow new logs as they become available.

var tail : int

If nonzero, the number of lines from the end of the logs to return. Note: tail applies per container, so you will get tail * total lines back.

var use_loki_backend : bool

UseLokiBackend causes the logs request to go through the loki backend rather than through kubernetes. This behavior can also be achieved by setting the LOKI_LOGGING feature flag.

var since : datetime.timedelta

Since specifies how far in the past to return logs from. It defaults to 24 hours.

class LogMessage (project_name: str = None, pipeline_name: str = None, job_id: str = None, worker_id: str = None, datum_id: str = None, master: bool = None, data: List[ForwardRef('InputFile')] = None, user: bool = None, ts: datetime.datetime = None, message: str = None)

LogMessage is a log line from a PPS worker, annotated with metadata indicating when and why the line was logged.

Expand source code
@dataclass(eq=False, repr=False)
class LogMessage(betterproto.Message):
    """
    LogMessage is a log line from a PPS worker, annotated with metadata
    indicating when and why the line was logged.
    """

    project_name: str = betterproto.string_field(10)
    """
    The job and pipeline for which a PFS file is being processed (if the job is
    an orphan job, pipeline name and ID will be unset)
    """

    pipeline_name: str = betterproto.string_field(1)
    job_id: str = betterproto.string_field(2)
    worker_id: str = betterproto.string_field(3)
    datum_id: str = betterproto.string_field(4)
    master: bool = betterproto.bool_field(5)
    data: List["InputFile"] = betterproto.message_field(6)
    """The PFS files being processed (one per pipeline/job input)"""

    user: bool = betterproto.bool_field(7)
    """User is true if log message comes from the users code."""

    ts: datetime = betterproto.message_field(8)
    """The message logged, and the time at which it was logged"""

    message: str = betterproto.string_field(9)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var project_name : str

The job and pipeline for which a PFS file is being processed (if the job is an orphan job, pipeline name and ID will be unset)

var pipeline_name : str
var job_id : str
var worker_id : str
var datum_id : str
var master : bool
var data : List[InputFile]

The PFS files being processed (one per pipeline/job input)

var user : bool

User is true if log message comes from the users code.

var ts : datetime.datetime

The message logged, and the time at which it was logged

var message : str
class RestartDatumRequest (job: Job = None, data_filters: List[str] = None)

RestartDatumRequest(job: 'Job' = None, data_filters: List[str] = None)

Expand source code
@dataclass(eq=False, repr=False)
class RestartDatumRequest(betterproto.Message):
    job: "Job" = betterproto.message_field(1)
    data_filters: List[str] = betterproto.string_field(2)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var job : Job
var data_filters : List[str]
class InspectDatumRequest (datum: Datum = None)

InspectDatumRequest(datum: 'Datum' = None)

Expand source code
@dataclass(eq=False, repr=False)
class InspectDatumRequest(betterproto.Message):
    datum: "Datum" = betterproto.message_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var datum : Datum
class ListDatumRequest (job: Job = None, input: Input = None, filter: ListDatumRequestFilter = None, pagination_marker: str = None, number: int = None, reverse: bool = None)

ListDatumRequest(job: 'Job' = None, input: 'Input' = None, filter: 'ListDatumRequestFilter' = None, pagination_marker: str = None, number: int = None, reverse: bool = None)

Expand source code
@dataclass(eq=False, repr=False)
class ListDatumRequest(betterproto.Message):
    job: "Job" = betterproto.message_field(1)
    """
    Job and Input are two different ways to specify the datums you want. Only
    one can be set. Job is the job to list datums from.
    """

    input: "Input" = betterproto.message_field(2)
    """
    Input is the input to list datums from. The datums listed are the ones that
    would be run if a pipeline was created with the provided input.
    """

    filter: "ListDatumRequestFilter" = betterproto.message_field(3)
    pagination_marker: str = betterproto.string_field(4)
    """datum id to start from. we do not include this datum in the response"""

    number: int = betterproto.int64_field(5)
    """Number of datums to return"""

    reverse: bool = betterproto.bool_field(6)
    """If true, return datums in reverse order"""

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var job : Job

Job and Input are two different ways to specify the datums you want. Only one can be set. Job is the job to list datums from.

var input : Input

Input is the input to list datums from. The datums listed are the ones that would be run if a pipeline was created with the provided input.

var filter : ListDatumRequestFilter
var pagination_marker : str

datum id to start from. we do not include this datum in the response

var number : int

Number of datums to return

var reverse : bool

If true, return datums in reverse order

class ListDatumRequestFilter (state: List[ForwardRef('DatumState')] = None)

Filter restricts returned DatumInfo messages to those which match all of the filtered attributes.

Expand source code
@dataclass(eq=False, repr=False)
class ListDatumRequestFilter(betterproto.Message):
    """
    Filter restricts returned DatumInfo messages to those which match all of
    the filtered attributes.
    """

    state: List["DatumState"] = betterproto.enum_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var state : List[DatumState]
class CreateDatumRequest (input: Input = None, number: int = None)

Emits a stream of datums as they are created from the given input. Client must cancel the stream when it no longer wants to receive datums.

Expand source code
@dataclass(eq=False, repr=False)
class CreateDatumRequest(betterproto.Message):
    """
    Emits a stream of datums as they are created from the given input. Client
    must cancel the stream when it no longer wants to receive datums.
    """

    input: "Input" = betterproto.message_field(1)
    """
    Input is the input to list datums from. The datums listed are the ones
    that would be run if a pipeline was created with the provided input. The
    input field is only required for the first request. The server ignores
    subsequent requests' input field.
    """

    number: int = betterproto.int64_field(2)
    """Number of datums to return in next response"""

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var input : Input

Input is the input to list datums from. The datums listed are the ones that would be run if a pipeline was created with the provided input. The input field is only required for the first request. The server ignores subsequent requests' input field.

var number : int

Number of datums to return in next response

class DatumSetSpec (number: int = None, size_bytes: int = None, per_worker: int = None)

DatumSetSpec specifies how a pipeline should split its datums into datum sets.

Expand source code
@dataclass(eq=False, repr=False)
class DatumSetSpec(betterproto.Message):
    """
    DatumSetSpec specifies how a pipeline should split its datums into datum
    sets.
    """

    number: int = betterproto.int64_field(1)
    """
    number, if nonzero, specifies that each datum set should contain `number`
    datums. Datum sets may contain fewer if the total number of datums don't
    divide evenly.
    """

    size_bytes: int = betterproto.int64_field(2)
    """
    size_bytes, if nonzero, specifies a target size for each datum set. Datum
    sets may be larger or smaller than size_bytes, but will usually be pretty
    close to size_bytes in size.
    """

    per_worker: int = betterproto.int64_field(3)
    """
    per_worker, if nonzero, specifies how many datum sets should be created for
    each worker. It can't be set with number or size_bytes.
    """

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var number : int

number, if nonzero, specifies that each datum set should contain number datums. Datum sets may contain fewer if the total number of datums don't divide evenly.

var size_bytes : int

size_bytes, if nonzero, specifies a target size for each datum set. Datum sets may be larger or smaller than size_bytes, but will usually be pretty close to size_bytes in size.

var per_worker : int

per_worker, if nonzero, specifies how many datum sets should be created for each worker. It can't be set with number or size_bytes.

class SchedulingSpec (node_selector: Dict[str, str] = None, priority_class_name: str = None)

SchedulingSpec(node_selector: Dict[str, str] = None, priority_class_name: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class SchedulingSpec(betterproto.Message):
    node_selector: Dict[str, str] = betterproto.map_field(
        1, betterproto.TYPE_STRING, betterproto.TYPE_STRING
    )
    priority_class_name: str = betterproto.string_field(2)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var node_selector : Dict[str, str]
var priority_class_name : str
class RerunPipelineRequest (pipeline: Pipeline = None, reprocess: bool = None)

RerunPipelineRequest(pipeline: 'Pipeline' = None, reprocess: bool = None)

Expand source code
@dataclass(eq=False, repr=False)
class RerunPipelineRequest(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)
    reprocess: bool = betterproto.bool_field(15)
    """Reprocess forces the pipeline to reprocess all datums."""

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var pipeline : Pipeline
var reprocess : bool

Reprocess forces the pipeline to reprocess all datums.

class CreatePipelineRequest (pipeline: Pipeline = None, tf_job: TfJob = None, transform: Transform = None, parallelism_spec: ParallelismSpec = None, egress: Egress = None, update: bool = None, output_branch: str = None, s3_out: bool = None, resource_requests: ResourceSpec = None, resource_limits: ResourceSpec = None, sidecar_resource_limits: ResourceSpec = None, input: Input = None, description: str = None, reprocess: bool = None, service: Service = None, spout: Spout = None, datum_set_spec: DatumSetSpec = None, datum_timeout: datetime.timedelta = None, job_timeout: datetime.timedelta = None, salt: str = None, datum_tries: int = None, scheduling_spec: SchedulingSpec = None, pod_spec: str = None, pod_patch: str = None, spec_commit: _pfs__.Commit = None, metadata: Metadata = None, reprocess_spec: str = None, autoscaling: bool = None, tolerations: List[ForwardRef('Toleration')] = None, sidecar_resource_requests: ResourceSpec = None, dry_run: bool = None, determined: Determined = None, maximum_expected_uptime: datetime.timedelta = None)

CreatePipelineRequest(pipeline: 'Pipeline' = None, tf_job: 'TfJob' = None, transform: 'Transform' = None, parallelism_spec: 'ParallelismSpec' = None, egress: 'Egress' = None, update: bool = None, output_branch: str = None, s3_out: bool = None, resource_requests: 'ResourceSpec' = None, resource_limits: 'ResourceSpec' = None, sidecar_resource_limits: 'ResourceSpec' = None, input: 'Input' = None, description: str = None, reprocess: bool = None, service: 'Service' = None, spout: 'Spout' = None, datum_set_spec: 'DatumSetSpec' = None, datum_timeout: datetime.timedelta = None, job_timeout: datetime.timedelta = None, salt: str = None, datum_tries: int = None, scheduling_spec: 'SchedulingSpec' = None, pod_spec: str = None, pod_patch: str = None, spec_commit: '_pfs__.Commit' = None, metadata: 'Metadata' = None, reprocess_spec: str = None, autoscaling: bool = None, tolerations: List[ForwardRef('Toleration')] = None, sidecar_resource_requests: 'ResourceSpec' = None, dry_run: bool = None, determined: 'Determined' = None, maximum_expected_uptime: datetime.timedelta = None)

Expand source code
@dataclass(eq=False, repr=False)
class CreatePipelineRequest(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)
    tf_job: "TfJob" = betterproto.message_field(2)
    """
    tf_job encodes a Kubeflow TFJob spec. Pachyderm uses this to create TFJobs
    when running in a kubernetes cluster on which kubeflow has been installed.
    Exactly one of 'tf_job' and 'transform' should be set
    """

    transform: "Transform" = betterproto.message_field(3)
    parallelism_spec: "ParallelismSpec" = betterproto.message_field(4)
    egress: "Egress" = betterproto.message_field(5)
    update: bool = betterproto.bool_field(6)
    output_branch: str = betterproto.string_field(7)
    s3_out: bool = betterproto.bool_field(8)
    """
    s3_out, if set, requires a pipeline's user to write to its output repo via
    Pachyderm's s3 gateway (if set, workers will serve Pachyderm's s3 gateway
    API at http://<pipeline>-s3.<namespace>/<job id>.out/my/file). In this mode
    /pfs/out won't be walked or uploaded, and the s3 gateway service in the
    workers will allow writes to the job's output commit
    """

    resource_requests: "ResourceSpec" = betterproto.message_field(9)
    resource_limits: "ResourceSpec" = betterproto.message_field(10)
    sidecar_resource_limits: "ResourceSpec" = betterproto.message_field(11)
    input: "Input" = betterproto.message_field(12)
    description: str = betterproto.string_field(13)
    reprocess: bool = betterproto.bool_field(15)
    """
    Reprocess forces the pipeline to reprocess all datums. It only has meaning
    if Update is true
    """

    service: "Service" = betterproto.message_field(17)
    spout: "Spout" = betterproto.message_field(18)
    datum_set_spec: "DatumSetSpec" = betterproto.message_field(19)
    datum_timeout: timedelta = betterproto.message_field(20)
    job_timeout: timedelta = betterproto.message_field(21)
    salt: str = betterproto.string_field(22)
    datum_tries: int = betterproto.int64_field(23)
    scheduling_spec: "SchedulingSpec" = betterproto.message_field(24)
    pod_spec: str = betterproto.string_field(25)
    pod_patch: str = betterproto.string_field(26)
    spec_commit: "_pfs__.Commit" = betterproto.message_field(27)
    metadata: "Metadata" = betterproto.message_field(28)
    reprocess_spec: str = betterproto.string_field(29)
    autoscaling: bool = betterproto.bool_field(30)
    tolerations: List["Toleration"] = betterproto.message_field(34)
    sidecar_resource_requests: "ResourceSpec" = betterproto.message_field(35)
    dry_run: bool = betterproto.bool_field(37)
    determined: "Determined" = betterproto.message_field(38)
    maximum_expected_uptime: timedelta = betterproto.message_field(39)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var pipeline : Pipeline
var tf_job : TfJob

tf_job encodes a Kubeflow TFJob spec. Pachyderm uses this to create TFJobs when running in a kubernetes cluster on which kubeflow has been installed. Exactly one of 'tf_job' and 'transform' should be set

var transform : Transform
var parallelism_spec : ParallelismSpec
var egress : Egress
var update : bool
var output_branch : str
var s3_out : bool

s3_out, if set, requires a pipeline's user to write to its output repo via Pachyderm's s3 gateway (if set, workers will serve Pachyderm's s3 gateway API at >-s3./.out/my/file). In this mode /pfs/out won't be walked or uploaded, and the s3 gateway service in the workers will allow writes to the job's output commit

var resource_requests : ResourceSpec
var resource_limits : ResourceSpec
var sidecar_resource_limits : ResourceSpec
var input : Input
var description : str
var reprocess : bool

Reprocess forces the pipeline to reprocess all datums. It only has meaning if Update is true

var service : Service
var spout : Spout
var datum_set_spec : DatumSetSpec
var datum_timeout : datetime.timedelta
var job_timeout : datetime.timedelta
var salt : str
var datum_tries : int
var scheduling_spec : SchedulingSpec
var pod_spec : str
var pod_patch : str
var spec_commit : Commit
var metadata : Metadata
var reprocess_spec : str
var autoscaling : bool
var tolerations : List[Toleration]
var sidecar_resource_requests : ResourceSpec
var dry_run : bool
var determined : Determined
var maximum_expected_uptime : datetime.timedelta
class CreatePipelineV2Request (create_pipeline_request_json: str = None, dry_run: bool = None, update: bool = None, reprocess: bool = None)

CreatePipelineV2Request(create_pipeline_request_json: str = None, dry_run: bool = None, update: bool = None, reprocess: bool = None)

Expand source code
@dataclass(eq=False, repr=False)
class CreatePipelineV2Request(betterproto.Message):
    create_pipeline_request_json: str = betterproto.string_field(1)
    dry_run: bool = betterproto.bool_field(2)
    update: bool = betterproto.bool_field(3)
    reprocess: bool = betterproto.bool_field(4)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var create_pipeline_request_json : str
var dry_run : bool
var update : bool
var reprocess : bool
class CreatePipelineV2Response (effective_create_pipeline_request_json: str = None)

CreatePipelineV2Response(effective_create_pipeline_request_json: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class CreatePipelineV2Response(betterproto.Message):
    effective_create_pipeline_request_json: str = betterproto.string_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var effective_create_pipeline_request_json : str
class InspectPipelineRequest (pipeline: Pipeline = None, details: bool = None)

InspectPipelineRequest(pipeline: 'Pipeline' = None, details: bool = None)

Expand source code
@dataclass(eq=False, repr=False)
class InspectPipelineRequest(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)
    details: bool = betterproto.bool_field(2)
    """
    When true, return PipelineInfos with the details field, which requires
    loading the pipeline spec from PFS.
    """

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var pipeline : Pipeline
var details : bool

When true, return PipelineInfos with the details field, which requires loading the pipeline spec from PFS.

class ListPipelineRequest (pipeline: Pipeline = None, history: int = None, details: bool = None, jq_filter: str = None, commit_set: _pfs__.CommitSet = None, projects: List[ForwardRef('_pfs__.Project')] = None)

ListPipelineRequest(pipeline: 'Pipeline' = None, history: int = None, details: bool = None, jq_filter: str = None, commit_set: '_pfs__.CommitSet' = None, projects: List[ForwardRef('_pfs__.Project')] = None)

Expand source code
@dataclass(eq=False, repr=False)
class ListPipelineRequest(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)
    """
    If non-nil, only return info about a single pipeline, this is redundant
    with InspectPipeline unless history is non-zero.
    """

    history: int = betterproto.int64_field(2)
    """
    History indicates how many historical versions you want returned. Its
    semantics are: 0: Return the current version of the pipeline or pipelines.
    1: Return the above and the next most recent version 2: etc.-1: Return all
    historical versions.
    """

    details: bool = betterproto.bool_field(3)
    """Deprecated: Details are always returned."""

    jq_filter: str = betterproto.string_field(4)
    """A jq program string for additional result filtering"""

    commit_set: "_pfs__.CommitSet" = betterproto.message_field(5)
    """If non-nil, will return all the pipeline infos at this commit set"""

    projects: List["_pfs__.Project"] = betterproto.message_field(6)
    """
    Projects to filter on. Empty list means no filter, so return all pipelines.
    """

    def __post_init__(self) -> None:
        super().__post_init__()
        if self.is_set("details"):
            warnings.warn(
                "ListPipelineRequest.details is deprecated", DeprecationWarning
            )

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var pipeline : Pipeline

If non-nil, only return info about a single pipeline, this is redundant with InspectPipeline unless history is non-zero.

var history : int

History indicates how many historical versions you want returned. Its semantics are: 0: Return the current version of the pipeline or pipelines. 1: Return the above and the next most recent version 2: etc.-1: Return all historical versions.

var details : bool

Deprecated: Details are always returned.

var jq_filter : str

A jq program string for additional result filtering

var commit_set : CommitSet

If non-nil, will return all the pipeline infos at this commit set

var projects : List[Project]

Projects to filter on. Empty list means no filter, so return all pipelines.

class DeletePipelineRequest (pipeline: Pipeline = None, all: bool = None, force: bool = None, keep_repo: bool = None, must_exist: bool = None)

Delete a pipeline. If the deprecated all member is true, then delete all pipelines in the default project.

Expand source code
@dataclass(eq=False, repr=False)
class DeletePipelineRequest(betterproto.Message):
    """
    Delete a pipeline.  If the deprecated all member is true, then delete all
    pipelines in the default project.
    """

    pipeline: "Pipeline" = betterproto.message_field(1)
    all: bool = betterproto.bool_field(2)
    force: bool = betterproto.bool_field(3)
    keep_repo: bool = betterproto.bool_field(4)
    must_exist: bool = betterproto.bool_field(5)
    """If true, an error will be returned if the pipeline doesn't exist."""

    def __post_init__(self) -> None:
        super().__post_init__()
        if self.is_set("all"):
            warnings.warn("DeletePipelineRequest.all is deprecated", DeprecationWarning)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var pipeline : Pipeline
var all : bool
var force : bool
var keep_repo : bool
var must_exist : bool

If true, an error will be returned if the pipeline doesn't exist.

class DeletePipelinesRequest (projects: List[ForwardRef('_pfs__.Project')] = None, force: bool = None, keep_repo: bool = None, all: bool = None)

Delete more than one pipeline.

Expand source code
@dataclass(eq=False, repr=False)
class DeletePipelinesRequest(betterproto.Message):
    """Delete more than one pipeline."""

    projects: List["_pfs__.Project"] = betterproto.message_field(1)
    """
    All pipelines in each project will be deleted if the caller has permission.
    """

    force: bool = betterproto.bool_field(2)
    keep_repo: bool = betterproto.bool_field(3)
    all: bool = betterproto.bool_field(4)
    """
    If set, all pipelines in all projects will be deleted if the caller has
    permission.
    """

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var projects : List[Project]

All pipelines in each project will be deleted if the caller has permission.

var force : bool
var keep_repo : bool
var all : bool

If set, all pipelines in all projects will be deleted if the caller has permission.

class DeletePipelinesResponse (pipelines: List[ForwardRef('Pipeline')] = None)

DeletePipelinesResponse(pipelines: List[ForwardRef('Pipeline')] = None)

Expand source code
@dataclass(eq=False, repr=False)
class DeletePipelinesResponse(betterproto.Message):
    pipelines: List["Pipeline"] = betterproto.message_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var pipelines : List[Pipeline]
class StartPipelineRequest (pipeline: Pipeline = None)

StartPipelineRequest(pipeline: 'Pipeline' = None)

Expand source code
@dataclass(eq=False, repr=False)
class StartPipelineRequest(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var pipeline : Pipeline
class StopPipelineRequest (pipeline: Pipeline = None, must_exist: bool = None)

StopPipelineRequest(pipeline: 'Pipeline' = None, must_exist: bool = None)

Expand source code
@dataclass(eq=False, repr=False)
class StopPipelineRequest(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)
    must_exist: bool = betterproto.bool_field(2)
    """If true, an error will be returned if the pipeline doesn't exist."""

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var pipeline : Pipeline
var must_exist : bool

If true, an error will be returned if the pipeline doesn't exist.

class RunPipelineRequest (pipeline: Pipeline = None, provenance: List[ForwardRef('_pfs__.Commit')] = None, job_id: str = None)

RunPipelineRequest(pipeline: 'Pipeline' = None, provenance: List[ForwardRef('_pfs__.Commit')] = None, job_id: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class RunPipelineRequest(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)
    provenance: List["_pfs__.Commit"] = betterproto.message_field(2)
    job_id: str = betterproto.string_field(3)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var pipeline : Pipeline
var provenance : List[Commit]
var job_id : str
class RunCronRequest (pipeline: Pipeline = None)

RunCronRequest(pipeline: 'Pipeline' = None)

Expand source code
@dataclass(eq=False, repr=False)
class RunCronRequest(betterproto.Message):
    pipeline: "Pipeline" = betterproto.message_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var pipeline : Pipeline
class CheckStatusRequest (all: bool = None, project: _pfs__.Project = None)

Request to check the status of pipelines within a project.

Expand source code
@dataclass(eq=False, repr=False)
class CheckStatusRequest(betterproto.Message):
    """Request to check the status of pipelines within a project."""

    all: bool = betterproto.bool_field(1, group="context")
    """boolean field indicating status of all project pipelines."""

    project: "_pfs__.Project" = betterproto.message_field(2, group="context")
    """project field"""

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var all : bool

boolean field indicating status of all project pipelines.

var project : Project

project field

class CheckStatusResponse (project: _pfs__.Project = None, pipeline: Pipeline = None, alerts: List[str] = None)

Response for check status request. Provides alerts if any.

Expand source code
@dataclass(eq=False, repr=False)
class CheckStatusResponse(betterproto.Message):
    """Response for check status request. Provides alerts if any."""

    project: "_pfs__.Project" = betterproto.message_field(1)
    """project field"""

    pipeline: "Pipeline" = betterproto.message_field(2)
    """pipeline field"""

    alerts: List[str] = betterproto.string_field(3)
    """alert indicators"""

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var project : Project

project field

var pipeline : Pipeline

pipeline field

var alerts : List[str]

alert indicators

class CreateSecretRequest (file: bytes = None)

CreateSecretRequest(file: bytes = None)

Expand source code
@dataclass(eq=False, repr=False)
class CreateSecretRequest(betterproto.Message):
    file: bytes = betterproto.bytes_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var file : bytes
class DeleteSecretRequest (secret: Secret = None)

DeleteSecretRequest(secret: 'Secret' = None)

Expand source code
@dataclass(eq=False, repr=False)
class DeleteSecretRequest(betterproto.Message):
    secret: "Secret" = betterproto.message_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var secret : Secret
class InspectSecretRequest (secret: Secret = None)

InspectSecretRequest(secret: 'Secret' = None)

Expand source code
@dataclass(eq=False, repr=False)
class InspectSecretRequest(betterproto.Message):
    secret: "Secret" = betterproto.message_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var secret : Secret
class Secret (name: str = None)

Secret(name: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class Secret(betterproto.Message):
    name: str = betterproto.string_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var name : str
class SecretInfo (secret: Secret = None, type: str = None, creation_timestamp: datetime.datetime = None)

SecretInfo(secret: 'Secret' = None, type: str = None, creation_timestamp: datetime.datetime = None)

Expand source code
@dataclass(eq=False, repr=False)
class SecretInfo(betterproto.Message):
    secret: "Secret" = betterproto.message_field(1)
    type: str = betterproto.string_field(2)
    creation_timestamp: datetime = betterproto.message_field(3)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var secret : Secret
var type : str
var creation_timestamp : datetime.datetime
class SecretInfos (secret_info: List[ForwardRef('SecretInfo')] = None)

SecretInfos(secret_info: List[ForwardRef('SecretInfo')] = None)

Expand source code
@dataclass(eq=False, repr=False)
class SecretInfos(betterproto.Message):
    secret_info: List["SecretInfo"] = betterproto.message_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var secret_info : List[SecretInfo]
class ActivateAuthRequest

ActivateAuthRequest()

Expand source code
@dataclass(eq=False, repr=False)
class ActivateAuthRequest(betterproto.Message):
    pass

Ancestors

  • betterproto.Message
  • abc.ABC
class ActivateAuthResponse

ActivateAuthResponse()

Expand source code
@dataclass(eq=False, repr=False)
class ActivateAuthResponse(betterproto.Message):
    pass

Ancestors

  • betterproto.Message
  • abc.ABC
class RunLoadTestRequest (dag_spec: str = None, load_spec: str = None, seed: int = None, parallelism: int = None, pod_patch: str = None, state_id: str = None)

RunLoadTestRequest(dag_spec: str = None, load_spec: str = None, seed: int = None, parallelism: int = None, pod_patch: str = None, state_id: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class RunLoadTestRequest(betterproto.Message):
    dag_spec: str = betterproto.string_field(1)
    load_spec: str = betterproto.string_field(2)
    seed: int = betterproto.int64_field(3)
    parallelism: int = betterproto.int64_field(4)
    pod_patch: str = betterproto.string_field(5)
    state_id: str = betterproto.string_field(6)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var dag_spec : str
var load_spec : str
var seed : int
var parallelism : int
var pod_patch : str
var state_id : str
class RunLoadTestResponse (error: str = None, state_id: str = None)

RunLoadTestResponse(error: str = None, state_id: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class RunLoadTestResponse(betterproto.Message):
    error: str = betterproto.string_field(1)
    state_id: str = betterproto.string_field(2)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var error : str
var state_id : str
class RenderTemplateRequest (template: str = None, args: Dict[str, str] = None)

RenderTemplateRequest(template: str = None, args: Dict[str, str] = None)

Expand source code
@dataclass(eq=False, repr=False)
class RenderTemplateRequest(betterproto.Message):
    template: str = betterproto.string_field(1)
    args: Dict[str, str] = betterproto.map_field(
        2, betterproto.TYPE_STRING, betterproto.TYPE_STRING
    )

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var template : str
var args : Dict[str, str]
class RenderTemplateResponse (json: str = None, specs: List[ForwardRef('CreatePipelineRequest')] = None)

RenderTemplateResponse(json: str = None, specs: List[ForwardRef('CreatePipelineRequest')] = None)

Expand source code
@dataclass(eq=False, repr=False)
class RenderTemplateResponse(betterproto.Message):
    json: str = betterproto.string_field(1)
    specs: List["CreatePipelineRequest"] = betterproto.message_field(2)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var json : str
var specs : List[CreatePipelineRequest]
class LokiRequest (since: datetime.timedelta = None, query: str = None)

LokiRequest(since: datetime.timedelta = None, query: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class LokiRequest(betterproto.Message):
    since: timedelta = betterproto.message_field(1)
    query: str = betterproto.string_field(2)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var since : datetime.timedelta
var query : str
class LokiLogMessage (message: str = None)

LokiLogMessage(message: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class LokiLogMessage(betterproto.Message):
    message: str = betterproto.string_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var message : str
class ClusterDefaults (create_pipeline_request: CreatePipelineRequest = None)

ClusterDefaults(create_pipeline_request: 'CreatePipelineRequest' = None)

Expand source code
@dataclass(eq=False, repr=False)
class ClusterDefaults(betterproto.Message):
    create_pipeline_request: "CreatePipelineRequest" = betterproto.message_field(3)
    """
    CreatePipelineRequest contains the default JSON CreatePipelineRequest into
    which pipeline specs are merged to form the effective spec used to create a
    pipeline.
    """

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var create_pipeline_request : CreatePipelineRequest

CreatePipelineRequest contains the default JSON CreatePipelineRequest into which pipeline specs are merged to form the effective spec used to create a pipeline.

class GetClusterDefaultsRequest

GetClusterDefaultsRequest()

Expand source code
@dataclass(eq=False, repr=False)
class GetClusterDefaultsRequest(betterproto.Message):
    pass

Ancestors

  • betterproto.Message
  • abc.ABC
class GetClusterDefaultsResponse (cluster_defaults_json: str = None)

GetClusterDefaultsResponse(cluster_defaults_json: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class GetClusterDefaultsResponse(betterproto.Message):
    cluster_defaults_json: str = betterproto.string_field(2)
    """
    A JSON-encoded ClusterDefaults message, this is the verbatim input passed
    to SetClusterDefaults.
    """

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var cluster_defaults_json : str

A JSON-encoded ClusterDefaults message, this is the verbatim input passed to SetClusterDefaults.

class SetClusterDefaultsRequest (regenerate: bool = None, reprocess: bool = None, dry_run: bool = None, cluster_defaults_json: str = None)

SetClusterDefaultsRequest(regenerate: bool = None, reprocess: bool = None, dry_run: bool = None, cluster_defaults_json: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class SetClusterDefaultsRequest(betterproto.Message):
    regenerate: bool = betterproto.bool_field(2)
    reprocess: bool = betterproto.bool_field(3)
    dry_run: bool = betterproto.bool_field(4)
    cluster_defaults_json: str = betterproto.string_field(5)
    """
    A JSON-encoded ClusterDefaults message, this will be stored verbatim.
    """

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var regenerate : bool
var reprocess : bool
var dry_run : bool
var cluster_defaults_json : str

A JSON-encoded ClusterDefaults message, this will be stored verbatim.

class SetClusterDefaultsResponse (affected_pipelines: List[ForwardRef('Pipeline')] = None)

SetClusterDefaultsResponse(affected_pipelines: List[ForwardRef('Pipeline')] = None)

Expand source code
@dataclass(eq=False, repr=False)
class SetClusterDefaultsResponse(betterproto.Message):
    affected_pipelines: List["Pipeline"] = betterproto.message_field(2)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var affected_pipelines : List[Pipeline]
class CreatePipelineTransaction (create_pipeline_request: CreatePipelineRequest = None, user_json: str = None, effective_json: str = None)

CreatePipelineTransaction(create_pipeline_request: 'CreatePipelineRequest' = None, user_json: str = None, effective_json: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class CreatePipelineTransaction(betterproto.Message):
    create_pipeline_request: "CreatePipelineRequest" = betterproto.message_field(1)
    user_json: str = betterproto.string_field(2)
    effective_json: str = betterproto.string_field(3)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var create_pipeline_request : CreatePipelineRequest
var user_json : str
var effective_json : str
class ProjectDefaults (create_pipeline_request: CreatePipelineRequest = None)

ProjectDefaults(create_pipeline_request: 'CreatePipelineRequest' = None)

Expand source code
@dataclass(eq=False, repr=False)
class ProjectDefaults(betterproto.Message):
    create_pipeline_request: "CreatePipelineRequest" = betterproto.message_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var create_pipeline_request : CreatePipelineRequest
class GetProjectDefaultsRequest (project: _pfs__.Project = None)

GetProjectDefaultsRequest(project: '_pfs__.Project' = None)

Expand source code
@dataclass(eq=False, repr=False)
class GetProjectDefaultsRequest(betterproto.Message):
    project: "_pfs__.Project" = betterproto.message_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var project : Project
class GetProjectDefaultsResponse (project_defaults_json: str = None)

GetProjectDefaultsResponse(project_defaults_json: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class GetProjectDefaultsResponse(betterproto.Message):
    project_defaults_json: str = betterproto.string_field(1)
    """
    A JSON-encoded ProjectDefaults message, this is the verbatim input passed
    to SetProjectDefaults.
    """

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var project_defaults_json : str

A JSON-encoded ProjectDefaults message, this is the verbatim input passed to SetProjectDefaults.

class SetProjectDefaultsRequest (project: _pfs__.Project = None, regenerate: bool = None, reprocess: bool = None, dry_run: bool = None, project_defaults_json: str = None)

SetProjectDefaultsRequest(project: '_pfs__.Project' = None, regenerate: bool = None, reprocess: bool = None, dry_run: bool = None, project_defaults_json: str = None)

Expand source code
@dataclass(eq=False, repr=False)
class SetProjectDefaultsRequest(betterproto.Message):
    project: "_pfs__.Project" = betterproto.message_field(1)
    regenerate: bool = betterproto.bool_field(2)
    reprocess: bool = betterproto.bool_field(3)
    dry_run: bool = betterproto.bool_field(4)
    project_defaults_json: str = betterproto.string_field(5)
    """
    A JSON-encoded ProjectDefaults message, this will be stored verbatim.
    """

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var project : Project
var regenerate : bool
var reprocess : bool
var dry_run : bool
var project_defaults_json : str

A JSON-encoded ProjectDefaults message, this will be stored verbatim.

class SetProjectDefaultsResponse (affected_pipelines: List[ForwardRef('Pipeline')] = None)

SetProjectDefaultsResponse(affected_pipelines: List[ForwardRef('Pipeline')] = None)

Expand source code
@dataclass(eq=False, repr=False)
class SetProjectDefaultsResponse(betterproto.Message):
    affected_pipelines: List["Pipeline"] = betterproto.message_field(1)

Ancestors

  • betterproto.Message
  • abc.ABC

Class variables

var affected_pipelines : List[Pipeline]
class ApiStub (channel: grpc.Channel)
Expand source code
class ApiStub:

    def __init__(self, channel: "grpc.Channel"):
        self.__rpc_inspect_job = channel.unary_unary(
            "/pps_v2.API/InspectJob",
            request_serializer=InspectJobRequest.SerializeToString,
            response_deserializer=JobInfo.FromString,
        )
        self.__rpc_inspect_job_set = channel.unary_stream(
            "/pps_v2.API/InspectJobSet",
            request_serializer=InspectJobSetRequest.SerializeToString,
            response_deserializer=JobInfo.FromString,
        )
        self.__rpc_list_job = channel.unary_stream(
            "/pps_v2.API/ListJob",
            request_serializer=ListJobRequest.SerializeToString,
            response_deserializer=JobInfo.FromString,
        )
        self.__rpc_list_job_set = channel.unary_stream(
            "/pps_v2.API/ListJobSet",
            request_serializer=ListJobSetRequest.SerializeToString,
            response_deserializer=JobSetInfo.FromString,
        )
        self.__rpc_subscribe_job = channel.unary_stream(
            "/pps_v2.API/SubscribeJob",
            request_serializer=SubscribeJobRequest.SerializeToString,
            response_deserializer=JobInfo.FromString,
        )
        self.__rpc_delete_job = channel.unary_unary(
            "/pps_v2.API/DeleteJob",
            request_serializer=DeleteJobRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_stop_job = channel.unary_unary(
            "/pps_v2.API/StopJob",
            request_serializer=StopJobRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_inspect_datum = channel.unary_unary(
            "/pps_v2.API/InspectDatum",
            request_serializer=InspectDatumRequest.SerializeToString,
            response_deserializer=DatumInfo.FromString,
        )
        self.__rpc_list_datum = channel.unary_stream(
            "/pps_v2.API/ListDatum",
            request_serializer=ListDatumRequest.SerializeToString,
            response_deserializer=DatumInfo.FromString,
        )
        self.__rpc_create_datum = channel.stream_stream(
            "/pps_v2.API/CreateDatum",
            request_serializer=CreateDatumRequest.SerializeToString,
            response_deserializer=DatumInfo.FromString,
        )
        self.__rpc_restart_datum = channel.unary_unary(
            "/pps_v2.API/RestartDatum",
            request_serializer=RestartDatumRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_rerun_pipeline = channel.unary_unary(
            "/pps_v2.API/RerunPipeline",
            request_serializer=RerunPipelineRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_create_pipeline = channel.unary_unary(
            "/pps_v2.API/CreatePipeline",
            request_serializer=CreatePipelineRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_create_pipeline_v2 = channel.unary_unary(
            "/pps_v2.API/CreatePipelineV2",
            request_serializer=CreatePipelineV2Request.SerializeToString,
            response_deserializer=CreatePipelineV2Response.FromString,
        )
        self.__rpc_inspect_pipeline = channel.unary_unary(
            "/pps_v2.API/InspectPipeline",
            request_serializer=InspectPipelineRequest.SerializeToString,
            response_deserializer=PipelineInfo.FromString,
        )
        self.__rpc_list_pipeline = channel.unary_stream(
            "/pps_v2.API/ListPipeline",
            request_serializer=ListPipelineRequest.SerializeToString,
            response_deserializer=PipelineInfo.FromString,
        )
        self.__rpc_delete_pipeline = channel.unary_unary(
            "/pps_v2.API/DeletePipeline",
            request_serializer=DeletePipelineRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_delete_pipelines = channel.unary_unary(
            "/pps_v2.API/DeletePipelines",
            request_serializer=DeletePipelinesRequest.SerializeToString,
            response_deserializer=DeletePipelinesResponse.FromString,
        )
        self.__rpc_start_pipeline = channel.unary_unary(
            "/pps_v2.API/StartPipeline",
            request_serializer=StartPipelineRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_stop_pipeline = channel.unary_unary(
            "/pps_v2.API/StopPipeline",
            request_serializer=StopPipelineRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_run_pipeline = channel.unary_unary(
            "/pps_v2.API/RunPipeline",
            request_serializer=RunPipelineRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_run_cron = channel.unary_unary(
            "/pps_v2.API/RunCron",
            request_serializer=RunCronRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_check_status = channel.unary_stream(
            "/pps_v2.API/CheckStatus",
            request_serializer=CheckStatusRequest.SerializeToString,
            response_deserializer=CheckStatusResponse.FromString,
        )
        self.__rpc_create_secret = channel.unary_unary(
            "/pps_v2.API/CreateSecret",
            request_serializer=CreateSecretRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_delete_secret = channel.unary_unary(
            "/pps_v2.API/DeleteSecret",
            request_serializer=DeleteSecretRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_list_secret = channel.unary_unary(
            "/pps_v2.API/ListSecret",
            request_serializer=betterproto_lib_google_protobuf.Empty.SerializeToString,
            response_deserializer=SecretInfos.FromString,
        )
        self.__rpc_inspect_secret = channel.unary_unary(
            "/pps_v2.API/InspectSecret",
            request_serializer=InspectSecretRequest.SerializeToString,
            response_deserializer=SecretInfo.FromString,
        )
        self.__rpc_delete_all = channel.unary_unary(
            "/pps_v2.API/DeleteAll",
            request_serializer=betterproto_lib_google_protobuf.Empty.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_get_logs = channel.unary_stream(
            "/pps_v2.API/GetLogs",
            request_serializer=GetLogsRequest.SerializeToString,
            response_deserializer=LogMessage.FromString,
        )
        self.__rpc_activate_auth = channel.unary_unary(
            "/pps_v2.API/ActivateAuth",
            request_serializer=ActivateAuthRequest.SerializeToString,
            response_deserializer=ActivateAuthResponse.FromString,
        )
        self.__rpc_update_job_state = channel.unary_unary(
            "/pps_v2.API/UpdateJobState",
            request_serializer=UpdateJobStateRequest.SerializeToString,
            response_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
        )
        self.__rpc_run_load_test = channel.unary_unary(
            "/pps_v2.API/RunLoadTest",
            request_serializer=RunLoadTestRequest.SerializeToString,
            response_deserializer=RunLoadTestResponse.FromString,
        )
        self.__rpc_run_load_test_default = channel.unary_unary(
            "/pps_v2.API/RunLoadTestDefault",
            request_serializer=betterproto_lib_google_protobuf.Empty.SerializeToString,
            response_deserializer=RunLoadTestResponse.FromString,
        )
        self.__rpc_render_template = channel.unary_unary(
            "/pps_v2.API/RenderTemplate",
            request_serializer=RenderTemplateRequest.SerializeToString,
            response_deserializer=RenderTemplateResponse.FromString,
        )
        self.__rpc_list_task = channel.unary_stream(
            "/pps_v2.API/ListTask",
            request_serializer=_taskapi__.ListTaskRequest.SerializeToString,
            response_deserializer=_taskapi__.TaskInfo.FromString,
        )
        self.__rpc_get_kube_events = channel.unary_stream(
            "/pps_v2.API/GetKubeEvents",
            request_serializer=LokiRequest.SerializeToString,
            response_deserializer=LokiLogMessage.FromString,
        )
        self.__rpc_query_loki = channel.unary_stream(
            "/pps_v2.API/QueryLoki",
            request_serializer=LokiRequest.SerializeToString,
            response_deserializer=LokiLogMessage.FromString,
        )
        self.__rpc_get_cluster_defaults = channel.unary_unary(
            "/pps_v2.API/GetClusterDefaults",
            request_serializer=GetClusterDefaultsRequest.SerializeToString,
            response_deserializer=GetClusterDefaultsResponse.FromString,
        )
        self.__rpc_set_cluster_defaults = channel.unary_unary(
            "/pps_v2.API/SetClusterDefaults",
            request_serializer=SetClusterDefaultsRequest.SerializeToString,
            response_deserializer=SetClusterDefaultsResponse.FromString,
        )
        self.__rpc_get_project_defaults = channel.unary_unary(
            "/pps_v2.API/GetProjectDefaults",
            request_serializer=GetProjectDefaultsRequest.SerializeToString,
            response_deserializer=GetProjectDefaultsResponse.FromString,
        )
        self.__rpc_set_project_defaults = channel.unary_unary(
            "/pps_v2.API/SetProjectDefaults",
            request_serializer=SetProjectDefaultsRequest.SerializeToString,
            response_deserializer=SetProjectDefaultsResponse.FromString,
        )

    def inspect_job(
        self, *, job: "Job" = None, wait: bool = False, details: bool = False
    ) -> "JobInfo":

        request = InspectJobRequest()
        if job is not None:
            request.job = job
        request.wait = wait
        request.details = details

        return self.__rpc_inspect_job(request)

    def inspect_job_set(
        self, *, job_set: "JobSet" = None, wait: bool = False, details: bool = False
    ) -> Iterator["JobInfo"]:

        request = InspectJobSetRequest()
        if job_set is not None:
            request.job_set = job_set
        request.wait = wait
        request.details = details

        for response in self.__rpc_inspect_job_set(request):
            yield response

    def list_job(
        self,
        *,
        projects: Optional[List["_pfs__.Project"]] = None,
        pipeline: "Pipeline" = None,
        input_commit: Optional[List["_pfs__.Commit"]] = None,
        history: int = 0,
        details: bool = False,
        jq_filter: str = "",
        pagination_marker: datetime = None,
        number: int = 0,
        reverse: bool = False
    ) -> Iterator["JobInfo"]:
        projects = projects or []
        input_commit = input_commit or []

        request = ListJobRequest()
        if projects is not None:
            request.projects = projects
        if pipeline is not None:
            request.pipeline = pipeline
        if input_commit is not None:
            request.input_commit = input_commit
        request.history = history
        request.details = details
        request.jq_filter = jq_filter
        if pagination_marker is not None:
            request.pagination_marker = pagination_marker
        request.number = number
        request.reverse = reverse

        for response in self.__rpc_list_job(request):
            yield response

    def list_job_set(
        self,
        *,
        details: bool = False,
        projects: Optional[List["_pfs__.Project"]] = None,
        pagination_marker: datetime = None,
        number: int = 0,
        reverse: bool = False,
        jq_filter: str = ""
    ) -> Iterator["JobSetInfo"]:
        projects = projects or []

        request = ListJobSetRequest()
        request.details = details
        if projects is not None:
            request.projects = projects
        if pagination_marker is not None:
            request.pagination_marker = pagination_marker
        request.number = number
        request.reverse = reverse
        request.jq_filter = jq_filter

        for response in self.__rpc_list_job_set(request):
            yield response

    def subscribe_job(
        self, *, pipeline: "Pipeline" = None, details: bool = False
    ) -> Iterator["JobInfo"]:

        request = SubscribeJobRequest()
        if pipeline is not None:
            request.pipeline = pipeline
        request.details = details

        for response in self.__rpc_subscribe_job(request):
            yield response

    def delete_job(
        self, *, job: "Job" = None
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = DeleteJobRequest()
        if job is not None:
            request.job = job

        return self.__rpc_delete_job(request)

    def stop_job(
        self, *, job: "Job" = None, reason: str = ""
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = StopJobRequest()
        if job is not None:
            request.job = job
        request.reason = reason

        return self.__rpc_stop_job(request)

    def inspect_datum(self, *, datum: "Datum" = None) -> "DatumInfo":

        request = InspectDatumRequest()
        if datum is not None:
            request.datum = datum

        return self.__rpc_inspect_datum(request)

    def list_datum(
        self,
        *,
        job: "Job" = None,
        input: "Input" = None,
        filter: "ListDatumRequestFilter" = None,
        pagination_marker: str = "",
        number: int = 0,
        reverse: bool = False
    ) -> Iterator["DatumInfo"]:

        request = ListDatumRequest()
        if job is not None:
            request.job = job
        if input is not None:
            request.input = input
        if filter is not None:
            request.filter = filter
        request.pagination_marker = pagination_marker
        request.number = number
        request.reverse = reverse

        for response in self.__rpc_list_datum(request):
            yield response

    def create_datum(
        self,
        request_iterator: Union[
            AsyncIterable["CreateDatumRequest"], Iterable["CreateDatumRequest"]
        ],
    ) -> Iterator["DatumInfo"]:

        for response in self.__rpc_create_datum(request_iterator):
            yield response

    def restart_datum(
        self, *, job: "Job" = None, data_filters: Optional[List[str]] = None
    ) -> "betterproto_lib_google_protobuf.Empty":
        data_filters = data_filters or []

        request = RestartDatumRequest()
        if job is not None:
            request.job = job
        request.data_filters = data_filters

        return self.__rpc_restart_datum(request)

    def rerun_pipeline(
        self, *, pipeline: "Pipeline" = None, reprocess: bool = False
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = RerunPipelineRequest()
        if pipeline is not None:
            request.pipeline = pipeline
        request.reprocess = reprocess

        return self.__rpc_rerun_pipeline(request)

    def create_pipeline(
        self,
        *,
        pipeline: "Pipeline" = None,
        tf_job: "TfJob" = None,
        transform: "Transform" = None,
        parallelism_spec: "ParallelismSpec" = None,
        egress: "Egress" = None,
        update: bool = False,
        output_branch: str = "",
        s3_out: bool = False,
        resource_requests: "ResourceSpec" = None,
        resource_limits: "ResourceSpec" = None,
        sidecar_resource_limits: "ResourceSpec" = None,
        input: "Input" = None,
        description: str = "",
        reprocess: bool = False,
        service: "Service" = None,
        spout: "Spout" = None,
        datum_set_spec: "DatumSetSpec" = None,
        datum_timeout: timedelta = None,
        job_timeout: timedelta = None,
        salt: str = "",
        datum_tries: int = 0,
        scheduling_spec: "SchedulingSpec" = None,
        pod_spec: str = "",
        pod_patch: str = "",
        spec_commit: "_pfs__.Commit" = None,
        metadata: "Metadata" = None,
        reprocess_spec: str = "",
        autoscaling: bool = False,
        tolerations: Optional[List["Toleration"]] = None,
        sidecar_resource_requests: "ResourceSpec" = None,
        dry_run: bool = False,
        determined: "Determined" = None,
        maximum_expected_uptime: timedelta = None
    ) -> "betterproto_lib_google_protobuf.Empty":
        tolerations = tolerations or []

        request = CreatePipelineRequest()
        if pipeline is not None:
            request.pipeline = pipeline
        if tf_job is not None:
            request.tf_job = tf_job
        if transform is not None:
            request.transform = transform
        if parallelism_spec is not None:
            request.parallelism_spec = parallelism_spec
        if egress is not None:
            request.egress = egress
        request.update = update
        request.output_branch = output_branch
        request.s3_out = s3_out
        if resource_requests is not None:
            request.resource_requests = resource_requests
        if resource_limits is not None:
            request.resource_limits = resource_limits
        if sidecar_resource_limits is not None:
            request.sidecar_resource_limits = sidecar_resource_limits
        if input is not None:
            request.input = input
        request.description = description
        request.reprocess = reprocess
        if service is not None:
            request.service = service
        if spout is not None:
            request.spout = spout
        if datum_set_spec is not None:
            request.datum_set_spec = datum_set_spec
        if datum_timeout is not None:
            request.datum_timeout = datum_timeout
        if job_timeout is not None:
            request.job_timeout = job_timeout
        request.salt = salt
        request.datum_tries = datum_tries
        if scheduling_spec is not None:
            request.scheduling_spec = scheduling_spec
        request.pod_spec = pod_spec
        request.pod_patch = pod_patch
        if spec_commit is not None:
            request.spec_commit = spec_commit
        if metadata is not None:
            request.metadata = metadata
        request.reprocess_spec = reprocess_spec
        request.autoscaling = autoscaling
        if tolerations is not None:
            request.tolerations = tolerations
        if sidecar_resource_requests is not None:
            request.sidecar_resource_requests = sidecar_resource_requests
        request.dry_run = dry_run
        if determined is not None:
            request.determined = determined
        if maximum_expected_uptime is not None:
            request.maximum_expected_uptime = maximum_expected_uptime

        return self.__rpc_create_pipeline(request)

    def create_pipeline_v2(
        self,
        *,
        create_pipeline_request_json: str = "",
        dry_run: bool = False,
        update: bool = False,
        reprocess: bool = False
    ) -> "CreatePipelineV2Response":

        request = CreatePipelineV2Request()
        request.create_pipeline_request_json = create_pipeline_request_json
        request.dry_run = dry_run
        request.update = update
        request.reprocess = reprocess

        return self.__rpc_create_pipeline_v2(request)

    def inspect_pipeline(
        self, *, pipeline: "Pipeline" = None, details: bool = False
    ) -> "PipelineInfo":

        request = InspectPipelineRequest()
        if pipeline is not None:
            request.pipeline = pipeline
        request.details = details

        return self.__rpc_inspect_pipeline(request)

    def list_pipeline(
        self,
        *,
        pipeline: "Pipeline" = None,
        history: int = 0,
        details: bool = False,
        jq_filter: str = "",
        commit_set: "_pfs__.CommitSet" = None,
        projects: Optional[List["_pfs__.Project"]] = None
    ) -> Iterator["PipelineInfo"]:
        projects = projects or []

        request = ListPipelineRequest()
        if pipeline is not None:
            request.pipeline = pipeline
        request.history = history
        request.details = details
        request.jq_filter = jq_filter
        if commit_set is not None:
            request.commit_set = commit_set
        if projects is not None:
            request.projects = projects

        for response in self.__rpc_list_pipeline(request):
            yield response

    def delete_pipeline(
        self,
        *,
        pipeline: "Pipeline" = None,
        all: bool = False,
        force: bool = False,
        keep_repo: bool = False,
        must_exist: bool = False
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = DeletePipelineRequest()
        if pipeline is not None:
            request.pipeline = pipeline
        request.all = all
        request.force = force
        request.keep_repo = keep_repo
        request.must_exist = must_exist

        return self.__rpc_delete_pipeline(request)

    def delete_pipelines(
        self,
        *,
        projects: Optional[List["_pfs__.Project"]] = None,
        force: bool = False,
        keep_repo: bool = False,
        all: bool = False
    ) -> "DeletePipelinesResponse":
        projects = projects or []

        request = DeletePipelinesRequest()
        if projects is not None:
            request.projects = projects
        request.force = force
        request.keep_repo = keep_repo
        request.all = all

        return self.__rpc_delete_pipelines(request)

    def start_pipeline(
        self, *, pipeline: "Pipeline" = None
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = StartPipelineRequest()
        if pipeline is not None:
            request.pipeline = pipeline

        return self.__rpc_start_pipeline(request)

    def stop_pipeline(
        self, *, pipeline: "Pipeline" = None, must_exist: bool = False
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = StopPipelineRequest()
        if pipeline is not None:
            request.pipeline = pipeline
        request.must_exist = must_exist

        return self.__rpc_stop_pipeline(request)

    def run_pipeline(
        self,
        *,
        pipeline: "Pipeline" = None,
        provenance: Optional[List["_pfs__.Commit"]] = None,
        job_id: str = ""
    ) -> "betterproto_lib_google_protobuf.Empty":
        provenance = provenance or []

        request = RunPipelineRequest()
        if pipeline is not None:
            request.pipeline = pipeline
        if provenance is not None:
            request.provenance = provenance
        request.job_id = job_id

        return self.__rpc_run_pipeline(request)

    def run_cron(
        self, *, pipeline: "Pipeline" = None
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = RunCronRequest()
        if pipeline is not None:
            request.pipeline = pipeline

        return self.__rpc_run_cron(request)

    def check_status(
        self, *, all: bool = False, project: "_pfs__.Project" = None
    ) -> Iterator["CheckStatusResponse"]:

        request = CheckStatusRequest()
        request.all = all
        if project is not None:
            request.project = project

        for response in self.__rpc_check_status(request):
            yield response

    def create_secret(
        self, *, file: bytes = b""
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = CreateSecretRequest()
        request.file = file

        return self.__rpc_create_secret(request)

    def delete_secret(
        self, *, secret: "Secret" = None
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = DeleteSecretRequest()
        if secret is not None:
            request.secret = secret

        return self.__rpc_delete_secret(request)

    def list_secret(self) -> "SecretInfos":

        request = betterproto_lib_google_protobuf.Empty()

        return self.__rpc_list_secret(request)

    def inspect_secret(self, *, secret: "Secret" = None) -> "SecretInfo":

        request = InspectSecretRequest()
        if secret is not None:
            request.secret = secret

        return self.__rpc_inspect_secret(request)

    def delete_all(self) -> "betterproto_lib_google_protobuf.Empty":

        request = betterproto_lib_google_protobuf.Empty()

        return self.__rpc_delete_all(request)

    def get_logs(
        self,
        *,
        pipeline: "Pipeline" = None,
        job: "Job" = None,
        data_filters: Optional[List[str]] = None,
        datum: "Datum" = None,
        master: bool = False,
        follow: bool = False,
        tail: int = 0,
        use_loki_backend: bool = False,
        since: timedelta = None
    ) -> Iterator["LogMessage"]:
        data_filters = data_filters or []

        request = GetLogsRequest()
        if pipeline is not None:
            request.pipeline = pipeline
        if job is not None:
            request.job = job
        request.data_filters = data_filters
        if datum is not None:
            request.datum = datum
        request.master = master
        request.follow = follow
        request.tail = tail
        request.use_loki_backend = use_loki_backend
        if since is not None:
            request.since = since

        for response in self.__rpc_get_logs(request):
            yield response

    def activate_auth(self) -> "ActivateAuthResponse":

        request = ActivateAuthRequest()

        return self.__rpc_activate_auth(request)

    def update_job_state(
        self,
        *,
        job: "Job" = None,
        state: "JobState" = None,
        reason: str = "",
        restart: int = 0,
        data_processed: int = 0,
        data_skipped: int = 0,
        data_failed: int = 0,
        data_recovered: int = 0,
        data_total: int = 0,
        stats: "ProcessStats" = None
    ) -> "betterproto_lib_google_protobuf.Empty":

        request = UpdateJobStateRequest()
        if job is not None:
            request.job = job
        request.state = state
        request.reason = reason
        request.restart = restart
        request.data_processed = data_processed
        request.data_skipped = data_skipped
        request.data_failed = data_failed
        request.data_recovered = data_recovered
        request.data_total = data_total
        if stats is not None:
            request.stats = stats

        return self.__rpc_update_job_state(request)

    def run_load_test(
        self,
        *,
        dag_spec: str = "",
        load_spec: str = "",
        seed: int = 0,
        parallelism: int = 0,
        pod_patch: str = "",
        state_id: str = ""
    ) -> "RunLoadTestResponse":

        request = RunLoadTestRequest()
        request.dag_spec = dag_spec
        request.load_spec = load_spec
        request.seed = seed
        request.parallelism = parallelism
        request.pod_patch = pod_patch
        request.state_id = state_id

        return self.__rpc_run_load_test(request)

    def run_load_test_default(self) -> "RunLoadTestResponse":

        request = betterproto_lib_google_protobuf.Empty()

        return self.__rpc_run_load_test_default(request)

    def render_template(
        self, *, template: str = "", args: Dict[str, str] = None
    ) -> "RenderTemplateResponse":

        request = RenderTemplateRequest()
        request.template = template
        request.args = args

        return self.__rpc_render_template(request)

    def list_task(self, *, group: "Group" = None) -> Iterator["_taskapi__.TaskInfo"]:

        request = _taskapi__.ListTaskRequest()
        if group is not None:
            request.group = group

        for response in self.__rpc_list_task(request):
            yield response

    def get_kube_events(
        self, *, since: timedelta = None, query: str = ""
    ) -> Iterator["LokiLogMessage"]:

        request = LokiRequest()
        if since is not None:
            request.since = since
        request.query = query

        for response in self.__rpc_get_kube_events(request):
            yield response

    def query_loki(
        self, *, since: timedelta = None, query: str = ""
    ) -> Iterator["LokiLogMessage"]:

        request = LokiRequest()
        if since is not None:
            request.since = since
        request.query = query

        for response in self.__rpc_query_loki(request):
            yield response

    def get_cluster_defaults(self) -> "GetClusterDefaultsResponse":

        request = GetClusterDefaultsRequest()

        return self.__rpc_get_cluster_defaults(request)

    def set_cluster_defaults(
        self,
        *,
        regenerate: bool = False,
        reprocess: bool = False,
        dry_run: bool = False,
        cluster_defaults_json: str = ""
    ) -> "SetClusterDefaultsResponse":

        request = SetClusterDefaultsRequest()
        request.regenerate = regenerate
        request.reprocess = reprocess
        request.dry_run = dry_run
        request.cluster_defaults_json = cluster_defaults_json

        return self.__rpc_set_cluster_defaults(request)

    def get_project_defaults(
        self, *, project: "_pfs__.Project" = None
    ) -> "GetProjectDefaultsResponse":

        request = GetProjectDefaultsRequest()
        if project is not None:
            request.project = project

        return self.__rpc_get_project_defaults(request)

    def set_project_defaults(
        self,
        *,
        project: "_pfs__.Project" = None,
        regenerate: bool = False,
        reprocess: bool = False,
        dry_run: bool = False,
        project_defaults_json: str = ""
    ) -> "SetProjectDefaultsResponse":

        request = SetProjectDefaultsRequest()
        if project is not None:
            request.project = project
        request.regenerate = regenerate
        request.reprocess = reprocess
        request.dry_run = dry_run
        request.project_defaults_json = project_defaults_json

        return self.__rpc_set_project_defaults(request)

Subclasses

Methods

def inspect_job(self, *, job: Job = None, wait: bool = False, details: bool = False) ‑> JobInfo
Expand source code
def inspect_job(
    self, *, job: "Job" = None, wait: bool = False, details: bool = False
) -> "JobInfo":

    request = InspectJobRequest()
    if job is not None:
        request.job = job
    request.wait = wait
    request.details = details

    return self.__rpc_inspect_job(request)
def inspect_job_set(self, *, job_set: JobSet = None, wait: bool = False, details: bool = False) ‑> Iterator[JobInfo]
Expand source code
def inspect_job_set(
    self, *, job_set: "JobSet" = None, wait: bool = False, details: bool = False
) -> Iterator["JobInfo"]:

    request = InspectJobSetRequest()
    if job_set is not None:
        request.job_set = job_set
    request.wait = wait
    request.details = details

    for response in self.__rpc_inspect_job_set(request):
        yield response
def list_job(self, *, projects: Optional[List[ForwardRef('_pfs__.Project')]] = None, pipeline: Pipeline = None, input_commit: Optional[List[ForwardRef('_pfs__.Commit')]] = None, history: int = 0, details: bool = False, jq_filter: str = '', pagination_marker: datetime.datetime = None, number: int = 0, reverse: bool = False) ‑> Iterator[JobInfo]
Expand source code
def list_job(
    self,
    *,
    projects: Optional[List["_pfs__.Project"]] = None,
    pipeline: "Pipeline" = None,
    input_commit: Optional[List["_pfs__.Commit"]] = None,
    history: int = 0,
    details: bool = False,
    jq_filter: str = "",
    pagination_marker: datetime = None,
    number: int = 0,
    reverse: bool = False
) -> Iterator["JobInfo"]:
    projects = projects or []
    input_commit = input_commit or []

    request = ListJobRequest()
    if projects is not None:
        request.projects = projects
    if pipeline is not None:
        request.pipeline = pipeline
    if input_commit is not None:
        request.input_commit = input_commit
    request.history = history
    request.details = details
    request.jq_filter = jq_filter
    if pagination_marker is not None:
        request.pagination_marker = pagination_marker
    request.number = number
    request.reverse = reverse

    for response in self.__rpc_list_job(request):
        yield response
def list_job_set(self, *, details: bool = False, projects: Optional[List[ForwardRef('_pfs__.Project')]] = None, pagination_marker: datetime.datetime = None, number: int = 0, reverse: bool = False, jq_filter: str = '') ‑> Iterator[JobSetInfo]
Expand source code
def list_job_set(
    self,
    *,
    details: bool = False,
    projects: Optional[List["_pfs__.Project"]] = None,
    pagination_marker: datetime = None,
    number: int = 0,
    reverse: bool = False,
    jq_filter: str = ""
) -> Iterator["JobSetInfo"]:
    projects = projects or []

    request = ListJobSetRequest()
    request.details = details
    if projects is not None:
        request.projects = projects
    if pagination_marker is not None:
        request.pagination_marker = pagination_marker
    request.number = number
    request.reverse = reverse
    request.jq_filter = jq_filter

    for response in self.__rpc_list_job_set(request):
        yield response
def subscribe_job(self, *, pipeline: Pipeline = None, details: bool = False) ‑> Iterator[JobInfo]
Expand source code
def subscribe_job(
    self, *, pipeline: "Pipeline" = None, details: bool = False
) -> Iterator["JobInfo"]:

    request = SubscribeJobRequest()
    if pipeline is not None:
        request.pipeline = pipeline
    request.details = details

    for response in self.__rpc_subscribe_job(request):
        yield response
def delete_job(self, *, job: Job = None) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def delete_job(
    self, *, job: "Job" = None
) -> "betterproto_lib_google_protobuf.Empty":

    request = DeleteJobRequest()
    if job is not None:
        request.job = job

    return self.__rpc_delete_job(request)
def stop_job(self, *, job: Job = None, reason: str = '') ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def stop_job(
    self, *, job: "Job" = None, reason: str = ""
) -> "betterproto_lib_google_protobuf.Empty":

    request = StopJobRequest()
    if job is not None:
        request.job = job
    request.reason = reason

    return self.__rpc_stop_job(request)
def inspect_datum(self, *, datum: Datum = None) ‑> DatumInfo
Expand source code
def inspect_datum(self, *, datum: "Datum" = None) -> "DatumInfo":

    request = InspectDatumRequest()
    if datum is not None:
        request.datum = datum

    return self.__rpc_inspect_datum(request)
def list_datum(self, *, job: Job = None, input: Input = None, filter: ListDatumRequestFilter = None, pagination_marker: str = '', number: int = 0, reverse: bool = False) ‑> Iterator[DatumInfo]
Expand source code
def list_datum(
    self,
    *,
    job: "Job" = None,
    input: "Input" = None,
    filter: "ListDatumRequestFilter" = None,
    pagination_marker: str = "",
    number: int = 0,
    reverse: bool = False
) -> Iterator["DatumInfo"]:

    request = ListDatumRequest()
    if job is not None:
        request.job = job
    if input is not None:
        request.input = input
    if filter is not None:
        request.filter = filter
    request.pagination_marker = pagination_marker
    request.number = number
    request.reverse = reverse

    for response in self.__rpc_list_datum(request):
        yield response
def create_datum(self, request_iterator: Union[AsyncIterable[ForwardRef('CreateDatumRequest')], Iterable[ForwardRef('CreateDatumRequest')]]) ‑> Iterator[DatumInfo]
Expand source code
def create_datum(
    self,
    request_iterator: Union[
        AsyncIterable["CreateDatumRequest"], Iterable["CreateDatumRequest"]
    ],
) -> Iterator["DatumInfo"]:

    for response in self.__rpc_create_datum(request_iterator):
        yield response
def restart_datum(self, *, job: Job = None, data_filters: Optional[List[str]] = None) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def restart_datum(
    self, *, job: "Job" = None, data_filters: Optional[List[str]] = None
) -> "betterproto_lib_google_protobuf.Empty":
    data_filters = data_filters or []

    request = RestartDatumRequest()
    if job is not None:
        request.job = job
    request.data_filters = data_filters

    return self.__rpc_restart_datum(request)
def rerun_pipeline(self, *, pipeline: Pipeline = None, reprocess: bool = False) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def rerun_pipeline(
    self, *, pipeline: "Pipeline" = None, reprocess: bool = False
) -> "betterproto_lib_google_protobuf.Empty":

    request = RerunPipelineRequest()
    if pipeline is not None:
        request.pipeline = pipeline
    request.reprocess = reprocess

    return self.__rpc_rerun_pipeline(request)
def create_pipeline(self, *, pipeline: Pipeline = None, tf_job: TfJob = None, transform: Transform = None, parallelism_spec: ParallelismSpec = None, egress: Egress = None, update: bool = False, output_branch: str = '', s3_out: bool = False, resource_requests: ResourceSpec = None, resource_limits: ResourceSpec = None, sidecar_resource_limits: ResourceSpec = None, input: Input = None, description: str = '', reprocess: bool = False, service: Service = None, spout: Spout = None, datum_set_spec: DatumSetSpec = None, datum_timeout: datetime.timedelta = None, job_timeout: datetime.timedelta = None, salt: str = '', datum_tries: int = 0, scheduling_spec: SchedulingSpec = None, pod_spec: str = '', pod_patch: str = '', spec_commit: _pfs__.Commit = None, metadata: Metadata = None, reprocess_spec: str = '', autoscaling: bool = False, tolerations: Optional[List[ForwardRef('Toleration')]] = None, sidecar_resource_requests: ResourceSpec = None, dry_run: bool = False, determined: Determined = None, maximum_expected_uptime: datetime.timedelta = None) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def create_pipeline(
    self,
    *,
    pipeline: "Pipeline" = None,
    tf_job: "TfJob" = None,
    transform: "Transform" = None,
    parallelism_spec: "ParallelismSpec" = None,
    egress: "Egress" = None,
    update: bool = False,
    output_branch: str = "",
    s3_out: bool = False,
    resource_requests: "ResourceSpec" = None,
    resource_limits: "ResourceSpec" = None,
    sidecar_resource_limits: "ResourceSpec" = None,
    input: "Input" = None,
    description: str = "",
    reprocess: bool = False,
    service: "Service" = None,
    spout: "Spout" = None,
    datum_set_spec: "DatumSetSpec" = None,
    datum_timeout: timedelta = None,
    job_timeout: timedelta = None,
    salt: str = "",
    datum_tries: int = 0,
    scheduling_spec: "SchedulingSpec" = None,
    pod_spec: str = "",
    pod_patch: str = "",
    spec_commit: "_pfs__.Commit" = None,
    metadata: "Metadata" = None,
    reprocess_spec: str = "",
    autoscaling: bool = False,
    tolerations: Optional[List["Toleration"]] = None,
    sidecar_resource_requests: "ResourceSpec" = None,
    dry_run: bool = False,
    determined: "Determined" = None,
    maximum_expected_uptime: timedelta = None
) -> "betterproto_lib_google_protobuf.Empty":
    tolerations = tolerations or []

    request = CreatePipelineRequest()
    if pipeline is not None:
        request.pipeline = pipeline
    if tf_job is not None:
        request.tf_job = tf_job
    if transform is not None:
        request.transform = transform
    if parallelism_spec is not None:
        request.parallelism_spec = parallelism_spec
    if egress is not None:
        request.egress = egress
    request.update = update
    request.output_branch = output_branch
    request.s3_out = s3_out
    if resource_requests is not None:
        request.resource_requests = resource_requests
    if resource_limits is not None:
        request.resource_limits = resource_limits
    if sidecar_resource_limits is not None:
        request.sidecar_resource_limits = sidecar_resource_limits
    if input is not None:
        request.input = input
    request.description = description
    request.reprocess = reprocess
    if service is not None:
        request.service = service
    if spout is not None:
        request.spout = spout
    if datum_set_spec is not None:
        request.datum_set_spec = datum_set_spec
    if datum_timeout is not None:
        request.datum_timeout = datum_timeout
    if job_timeout is not None:
        request.job_timeout = job_timeout
    request.salt = salt
    request.datum_tries = datum_tries
    if scheduling_spec is not None:
        request.scheduling_spec = scheduling_spec
    request.pod_spec = pod_spec
    request.pod_patch = pod_patch
    if spec_commit is not None:
        request.spec_commit = spec_commit
    if metadata is not None:
        request.metadata = metadata
    request.reprocess_spec = reprocess_spec
    request.autoscaling = autoscaling
    if tolerations is not None:
        request.tolerations = tolerations
    if sidecar_resource_requests is not None:
        request.sidecar_resource_requests = sidecar_resource_requests
    request.dry_run = dry_run
    if determined is not None:
        request.determined = determined
    if maximum_expected_uptime is not None:
        request.maximum_expected_uptime = maximum_expected_uptime

    return self.__rpc_create_pipeline(request)
def create_pipeline_v2(self, *, create_pipeline_request_json: str = '', dry_run: bool = False, update: bool = False, reprocess: bool = False) ‑> CreatePipelineV2Response
Expand source code
def create_pipeline_v2(
    self,
    *,
    create_pipeline_request_json: str = "",
    dry_run: bool = False,
    update: bool = False,
    reprocess: bool = False
) -> "CreatePipelineV2Response":

    request = CreatePipelineV2Request()
    request.create_pipeline_request_json = create_pipeline_request_json
    request.dry_run = dry_run
    request.update = update
    request.reprocess = reprocess

    return self.__rpc_create_pipeline_v2(request)
def inspect_pipeline(self, *, pipeline: Pipeline = None, details: bool = False) ‑> PipelineInfo
Expand source code
def inspect_pipeline(
    self, *, pipeline: "Pipeline" = None, details: bool = False
) -> "PipelineInfo":

    request = InspectPipelineRequest()
    if pipeline is not None:
        request.pipeline = pipeline
    request.details = details

    return self.__rpc_inspect_pipeline(request)
def list_pipeline(self, *, pipeline: Pipeline = None, history: int = 0, details: bool = False, jq_filter: str = '', commit_set: _pfs__.CommitSet = None, projects: Optional[List[ForwardRef('_pfs__.Project')]] = None) ‑> Iterator[PipelineInfo]
Expand source code
def list_pipeline(
    self,
    *,
    pipeline: "Pipeline" = None,
    history: int = 0,
    details: bool = False,
    jq_filter: str = "",
    commit_set: "_pfs__.CommitSet" = None,
    projects: Optional[List["_pfs__.Project"]] = None
) -> Iterator["PipelineInfo"]:
    projects = projects or []

    request = ListPipelineRequest()
    if pipeline is not None:
        request.pipeline = pipeline
    request.history = history
    request.details = details
    request.jq_filter = jq_filter
    if commit_set is not None:
        request.commit_set = commit_set
    if projects is not None:
        request.projects = projects

    for response in self.__rpc_list_pipeline(request):
        yield response
def delete_pipeline(self, *, pipeline: Pipeline = None, all: bool = False, force: bool = False, keep_repo: bool = False, must_exist: bool = False) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def delete_pipeline(
    self,
    *,
    pipeline: "Pipeline" = None,
    all: bool = False,
    force: bool = False,
    keep_repo: bool = False,
    must_exist: bool = False
) -> "betterproto_lib_google_protobuf.Empty":

    request = DeletePipelineRequest()
    if pipeline is not None:
        request.pipeline = pipeline
    request.all = all
    request.force = force
    request.keep_repo = keep_repo
    request.must_exist = must_exist

    return self.__rpc_delete_pipeline(request)
def delete_pipelines(self, *, projects: Optional[List[ForwardRef('_pfs__.Project')]] = None, force: bool = False, keep_repo: bool = False, all: bool = False) ‑> DeletePipelinesResponse
Expand source code
def delete_pipelines(
    self,
    *,
    projects: Optional[List["_pfs__.Project"]] = None,
    force: bool = False,
    keep_repo: bool = False,
    all: bool = False
) -> "DeletePipelinesResponse":
    projects = projects or []

    request = DeletePipelinesRequest()
    if projects is not None:
        request.projects = projects
    request.force = force
    request.keep_repo = keep_repo
    request.all = all

    return self.__rpc_delete_pipelines(request)
def start_pipeline(self, *, pipeline: Pipeline = None) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def start_pipeline(
    self, *, pipeline: "Pipeline" = None
) -> "betterproto_lib_google_protobuf.Empty":

    request = StartPipelineRequest()
    if pipeline is not None:
        request.pipeline = pipeline

    return self.__rpc_start_pipeline(request)
def stop_pipeline(self, *, pipeline: Pipeline = None, must_exist: bool = False) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def stop_pipeline(
    self, *, pipeline: "Pipeline" = None, must_exist: bool = False
) -> "betterproto_lib_google_protobuf.Empty":

    request = StopPipelineRequest()
    if pipeline is not None:
        request.pipeline = pipeline
    request.must_exist = must_exist

    return self.__rpc_stop_pipeline(request)
def run_pipeline(self, *, pipeline: Pipeline = None, provenance: Optional[List[ForwardRef('_pfs__.Commit')]] = None, job_id: str = '') ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def run_pipeline(
    self,
    *,
    pipeline: "Pipeline" = None,
    provenance: Optional[List["_pfs__.Commit"]] = None,
    job_id: str = ""
) -> "betterproto_lib_google_protobuf.Empty":
    provenance = provenance or []

    request = RunPipelineRequest()
    if pipeline is not None:
        request.pipeline = pipeline
    if provenance is not None:
        request.provenance = provenance
    request.job_id = job_id

    return self.__rpc_run_pipeline(request)
def run_cron(self, *, pipeline: Pipeline = None) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def run_cron(
    self, *, pipeline: "Pipeline" = None
) -> "betterproto_lib_google_protobuf.Empty":

    request = RunCronRequest()
    if pipeline is not None:
        request.pipeline = pipeline

    return self.__rpc_run_cron(request)
def check_status(self, *, all: bool = False, project: _pfs__.Project = None) ‑> Iterator[CheckStatusResponse]
Expand source code
def check_status(
    self, *, all: bool = False, project: "_pfs__.Project" = None
) -> Iterator["CheckStatusResponse"]:

    request = CheckStatusRequest()
    request.all = all
    if project is not None:
        request.project = project

    for response in self.__rpc_check_status(request):
        yield response
def create_secret(self, *, file: bytes = b'') ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def create_secret(
    self, *, file: bytes = b""
) -> "betterproto_lib_google_protobuf.Empty":

    request = CreateSecretRequest()
    request.file = file

    return self.__rpc_create_secret(request)
def delete_secret(self, *, secret: Secret = None) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def delete_secret(
    self, *, secret: "Secret" = None
) -> "betterproto_lib_google_protobuf.Empty":

    request = DeleteSecretRequest()
    if secret is not None:
        request.secret = secret

    return self.__rpc_delete_secret(request)
def list_secret(self) ‑> SecretInfos
Expand source code
def list_secret(self) -> "SecretInfos":

    request = betterproto_lib_google_protobuf.Empty()

    return self.__rpc_list_secret(request)
def inspect_secret(self, *, secret: Secret = None) ‑> SecretInfo
Expand source code
def inspect_secret(self, *, secret: "Secret" = None) -> "SecretInfo":

    request = InspectSecretRequest()
    if secret is not None:
        request.secret = secret

    return self.__rpc_inspect_secret(request)
def delete_all(self) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def delete_all(self) -> "betterproto_lib_google_protobuf.Empty":

    request = betterproto_lib_google_protobuf.Empty()

    return self.__rpc_delete_all(request)
def get_logs(self, *, pipeline: Pipeline = None, job: Job = None, data_filters: Optional[List[str]] = None, datum: Datum = None, master: bool = False, follow: bool = False, tail: int = 0, use_loki_backend: bool = False, since: datetime.timedelta = None) ‑> Iterator[LogMessage]
Expand source code
def get_logs(
    self,
    *,
    pipeline: "Pipeline" = None,
    job: "Job" = None,
    data_filters: Optional[List[str]] = None,
    datum: "Datum" = None,
    master: bool = False,
    follow: bool = False,
    tail: int = 0,
    use_loki_backend: bool = False,
    since: timedelta = None
) -> Iterator["LogMessage"]:
    data_filters = data_filters or []

    request = GetLogsRequest()
    if pipeline is not None:
        request.pipeline = pipeline
    if job is not None:
        request.job = job
    request.data_filters = data_filters
    if datum is not None:
        request.datum = datum
    request.master = master
    request.follow = follow
    request.tail = tail
    request.use_loki_backend = use_loki_backend
    if since is not None:
        request.since = since

    for response in self.__rpc_get_logs(request):
        yield response
def activate_auth(self) ‑> ActivateAuthResponse
Expand source code
def activate_auth(self) -> "ActivateAuthResponse":

    request = ActivateAuthRequest()

    return self.__rpc_activate_auth(request)
def update_job_state(self, *, job: Job = None, state: JobState = None, reason: str = '', restart: int = 0, data_processed: int = 0, data_skipped: int = 0, data_failed: int = 0, data_recovered: int = 0, data_total: int = 0, stats: ProcessStats = None) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def update_job_state(
    self,
    *,
    job: "Job" = None,
    state: "JobState" = None,
    reason: str = "",
    restart: int = 0,
    data_processed: int = 0,
    data_skipped: int = 0,
    data_failed: int = 0,
    data_recovered: int = 0,
    data_total: int = 0,
    stats: "ProcessStats" = None
) -> "betterproto_lib_google_protobuf.Empty":

    request = UpdateJobStateRequest()
    if job is not None:
        request.job = job
    request.state = state
    request.reason = reason
    request.restart = restart
    request.data_processed = data_processed
    request.data_skipped = data_skipped
    request.data_failed = data_failed
    request.data_recovered = data_recovered
    request.data_total = data_total
    if stats is not None:
        request.stats = stats

    return self.__rpc_update_job_state(request)
def run_load_test(self, *, dag_spec: str = '', load_spec: str = '', seed: int = 0, parallelism: int = 0, pod_patch: str = '', state_id: str = '') ‑> RunLoadTestResponse
Expand source code
def run_load_test(
    self,
    *,
    dag_spec: str = "",
    load_spec: str = "",
    seed: int = 0,
    parallelism: int = 0,
    pod_patch: str = "",
    state_id: str = ""
) -> "RunLoadTestResponse":

    request = RunLoadTestRequest()
    request.dag_spec = dag_spec
    request.load_spec = load_spec
    request.seed = seed
    request.parallelism = parallelism
    request.pod_patch = pod_patch
    request.state_id = state_id

    return self.__rpc_run_load_test(request)
def run_load_test_default(self) ‑> RunLoadTestResponse
Expand source code
def run_load_test_default(self) -> "RunLoadTestResponse":

    request = betterproto_lib_google_protobuf.Empty()

    return self.__rpc_run_load_test_default(request)
def render_template(self, *, template: str = '', args: Dict[str, str] = None) ‑> RenderTemplateResponse
Expand source code
def render_template(
    self, *, template: str = "", args: Dict[str, str] = None
) -> "RenderTemplateResponse":

    request = RenderTemplateRequest()
    request.template = template
    request.args = args

    return self.__rpc_render_template(request)
def list_task(self, *, group: Group = None) ‑> Iterator[_taskapi__.TaskInfo]
Expand source code
def list_task(self, *, group: "Group" = None) -> Iterator["_taskapi__.TaskInfo"]:

    request = _taskapi__.ListTaskRequest()
    if group is not None:
        request.group = group

    for response in self.__rpc_list_task(request):
        yield response
def get_kube_events(self, *, since: datetime.timedelta = None, query: str = '') ‑> Iterator[LokiLogMessage]
Expand source code
def get_kube_events(
    self, *, since: timedelta = None, query: str = ""
) -> Iterator["LokiLogMessage"]:

    request = LokiRequest()
    if since is not None:
        request.since = since
    request.query = query

    for response in self.__rpc_get_kube_events(request):
        yield response
def query_loki(self, *, since: datetime.timedelta = None, query: str = '') ‑> Iterator[LokiLogMessage]
Expand source code
def query_loki(
    self, *, since: timedelta = None, query: str = ""
) -> Iterator["LokiLogMessage"]:

    request = LokiRequest()
    if since is not None:
        request.since = since
    request.query = query

    for response in self.__rpc_query_loki(request):
        yield response
def get_cluster_defaults(self) ‑> GetClusterDefaultsResponse
Expand source code
def get_cluster_defaults(self) -> "GetClusterDefaultsResponse":

    request = GetClusterDefaultsRequest()

    return self.__rpc_get_cluster_defaults(request)
def set_cluster_defaults(self, *, regenerate: bool = False, reprocess: bool = False, dry_run: bool = False, cluster_defaults_json: str = '') ‑> SetClusterDefaultsResponse
Expand source code
def set_cluster_defaults(
    self,
    *,
    regenerate: bool = False,
    reprocess: bool = False,
    dry_run: bool = False,
    cluster_defaults_json: str = ""
) -> "SetClusterDefaultsResponse":

    request = SetClusterDefaultsRequest()
    request.regenerate = regenerate
    request.reprocess = reprocess
    request.dry_run = dry_run
    request.cluster_defaults_json = cluster_defaults_json

    return self.__rpc_set_cluster_defaults(request)
def get_project_defaults(self, *, project: _pfs__.Project = None) ‑> GetProjectDefaultsResponse
Expand source code
def get_project_defaults(
    self, *, project: "_pfs__.Project" = None
) -> "GetProjectDefaultsResponse":

    request = GetProjectDefaultsRequest()
    if project is not None:
        request.project = project

    return self.__rpc_get_project_defaults(request)
def set_project_defaults(self, *, project: _pfs__.Project = None, regenerate: bool = False, reprocess: bool = False, dry_run: bool = False, project_defaults_json: str = '') ‑> SetProjectDefaultsResponse
Expand source code
def set_project_defaults(
    self,
    *,
    project: "_pfs__.Project" = None,
    regenerate: bool = False,
    reprocess: bool = False,
    dry_run: bool = False,
    project_defaults_json: str = ""
) -> "SetProjectDefaultsResponse":

    request = SetProjectDefaultsRequest()
    if project is not None:
        request.project = project
    request.regenerate = regenerate
    request.reprocess = reprocess
    request.dry_run = dry_run
    request.project_defaults_json = project_defaults_json

    return self.__rpc_set_project_defaults(request)
class ApiBase
Expand source code
class ApiBase:

    def inspect_job(
        self, job: "Job", wait: bool, details: bool, context: "grpc.ServicerContext"
    ) -> "JobInfo":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def inspect_job_set(
        self,
        job_set: "JobSet",
        wait: bool,
        details: bool,
        context: "grpc.ServicerContext",
    ) -> Iterator["JobInfo"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def list_job(
        self,
        projects: Optional[List["_pfs__.Project"]],
        pipeline: "Pipeline",
        input_commit: Optional[List["_pfs__.Commit"]],
        history: int,
        details: bool,
        jq_filter: str,
        pagination_marker: datetime,
        number: int,
        reverse: bool,
        context: "grpc.ServicerContext",
    ) -> Iterator["JobInfo"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def list_job_set(
        self,
        details: bool,
        projects: Optional[List["_pfs__.Project"]],
        pagination_marker: datetime,
        number: int,
        reverse: bool,
        jq_filter: str,
        context: "grpc.ServicerContext",
    ) -> Iterator["JobSetInfo"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def subscribe_job(
        self, pipeline: "Pipeline", details: bool, context: "grpc.ServicerContext"
    ) -> Iterator["JobInfo"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def delete_job(
        self, job: "Job", context: "grpc.ServicerContext"
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def stop_job(
        self, job: "Job", reason: str, context: "grpc.ServicerContext"
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def inspect_datum(
        self, datum: "Datum", context: "grpc.ServicerContext"
    ) -> "DatumInfo":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def list_datum(
        self,
        job: "Job",
        input: "Input",
        filter: "ListDatumRequestFilter",
        pagination_marker: str,
        number: int,
        reverse: bool,
        context: "grpc.ServicerContext",
    ) -> Iterator["DatumInfo"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def create_datum(
        self,
        request_iterator: Iterator["CreateDatumRequest"],
        context: "grpc.ServicerContext",
    ) -> Iterator["DatumInfo"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def restart_datum(
        self,
        job: "Job",
        data_filters: Optional[List[str]],
        context: "grpc.ServicerContext",
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def rerun_pipeline(
        self, pipeline: "Pipeline", reprocess: bool, context: "grpc.ServicerContext"
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def create_pipeline(
        self,
        pipeline: "Pipeline",
        tf_job: "TfJob",
        transform: "Transform",
        parallelism_spec: "ParallelismSpec",
        egress: "Egress",
        update: bool,
        output_branch: str,
        s3_out: bool,
        resource_requests: "ResourceSpec",
        resource_limits: "ResourceSpec",
        sidecar_resource_limits: "ResourceSpec",
        input: "Input",
        description: str,
        reprocess: bool,
        service: "Service",
        spout: "Spout",
        datum_set_spec: "DatumSetSpec",
        datum_timeout: timedelta,
        job_timeout: timedelta,
        salt: str,
        datum_tries: int,
        scheduling_spec: "SchedulingSpec",
        pod_spec: str,
        pod_patch: str,
        spec_commit: "_pfs__.Commit",
        metadata: "Metadata",
        reprocess_spec: str,
        autoscaling: bool,
        tolerations: Optional[List["Toleration"]],
        sidecar_resource_requests: "ResourceSpec",
        dry_run: bool,
        determined: "Determined",
        maximum_expected_uptime: timedelta,
        context: "grpc.ServicerContext",
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def create_pipeline_v2(
        self,
        create_pipeline_request_json: str,
        dry_run: bool,
        update: bool,
        reprocess: bool,
        context: "grpc.ServicerContext",
    ) -> "CreatePipelineV2Response":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def inspect_pipeline(
        self, pipeline: "Pipeline", details: bool, context: "grpc.ServicerContext"
    ) -> "PipelineInfo":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def list_pipeline(
        self,
        pipeline: "Pipeline",
        history: int,
        details: bool,
        jq_filter: str,
        commit_set: "_pfs__.CommitSet",
        projects: Optional[List["_pfs__.Project"]],
        context: "grpc.ServicerContext",
    ) -> Iterator["PipelineInfo"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def delete_pipeline(
        self,
        pipeline: "Pipeline",
        all: bool,
        force: bool,
        keep_repo: bool,
        must_exist: bool,
        context: "grpc.ServicerContext",
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def delete_pipelines(
        self,
        projects: Optional[List["_pfs__.Project"]],
        force: bool,
        keep_repo: bool,
        all: bool,
        context: "grpc.ServicerContext",
    ) -> "DeletePipelinesResponse":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def start_pipeline(
        self, pipeline: "Pipeline", context: "grpc.ServicerContext"
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def stop_pipeline(
        self, pipeline: "Pipeline", must_exist: bool, context: "grpc.ServicerContext"
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def run_pipeline(
        self,
        pipeline: "Pipeline",
        provenance: Optional[List["_pfs__.Commit"]],
        job_id: str,
        context: "grpc.ServicerContext",
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def run_cron(
        self, pipeline: "Pipeline", context: "grpc.ServicerContext"
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def check_status(
        self, all: bool, project: "_pfs__.Project", context: "grpc.ServicerContext"
    ) -> Iterator["CheckStatusResponse"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def create_secret(
        self, file: bytes, context: "grpc.ServicerContext"
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def delete_secret(
        self, secret: "Secret", context: "grpc.ServicerContext"
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def list_secret(self, context: "grpc.ServicerContext") -> "SecretInfos":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def inspect_secret(
        self, secret: "Secret", context: "grpc.ServicerContext"
    ) -> "SecretInfo":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def delete_all(
        self, context: "grpc.ServicerContext"
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def get_logs(
        self,
        pipeline: "Pipeline",
        job: "Job",
        data_filters: Optional[List[str]],
        datum: "Datum",
        master: bool,
        follow: bool,
        tail: int,
        use_loki_backend: bool,
        since: timedelta,
        context: "grpc.ServicerContext",
    ) -> Iterator["LogMessage"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def activate_auth(self, context: "grpc.ServicerContext") -> "ActivateAuthResponse":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def update_job_state(
        self,
        job: "Job",
        state: "JobState",
        reason: str,
        restart: int,
        data_processed: int,
        data_skipped: int,
        data_failed: int,
        data_recovered: int,
        data_total: int,
        stats: "ProcessStats",
        context: "grpc.ServicerContext",
    ) -> "betterproto_lib_google_protobuf.Empty":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def run_load_test(
        self,
        dag_spec: str,
        load_spec: str,
        seed: int,
        parallelism: int,
        pod_patch: str,
        state_id: str,
        context: "grpc.ServicerContext",
    ) -> "RunLoadTestResponse":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def run_load_test_default(
        self, context: "grpc.ServicerContext"
    ) -> "RunLoadTestResponse":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def render_template(
        self, template: str, args: Dict[str, str], context: "grpc.ServicerContext"
    ) -> "RenderTemplateResponse":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def list_task(
        self, group: "Group", context: "grpc.ServicerContext"
    ) -> Iterator["_taskapi__.TaskInfo"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def get_kube_events(
        self, since: timedelta, query: str, context: "grpc.ServicerContext"
    ) -> Iterator["LokiLogMessage"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def query_loki(
        self, since: timedelta, query: str, context: "grpc.ServicerContext"
    ) -> Iterator["LokiLogMessage"]:
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def get_cluster_defaults(
        self, context: "grpc.ServicerContext"
    ) -> "GetClusterDefaultsResponse":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def set_cluster_defaults(
        self,
        regenerate: bool,
        reprocess: bool,
        dry_run: bool,
        cluster_defaults_json: str,
        context: "grpc.ServicerContext",
    ) -> "SetClusterDefaultsResponse":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def get_project_defaults(
        self, project: "_pfs__.Project", context: "grpc.ServicerContext"
    ) -> "GetProjectDefaultsResponse":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    def set_project_defaults(
        self,
        project: "_pfs__.Project",
        regenerate: bool,
        reprocess: bool,
        dry_run: bool,
        project_defaults_json: str,
        context: "grpc.ServicerContext",
    ) -> "SetProjectDefaultsResponse":
        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
        context.set_details("Method not implemented!")
        raise NotImplementedError("Method not implemented!")

    __proto_path__ = "pps_v2.API"

    @property
    def __rpc_methods__(self):
        return {
            "InspectJob": grpc.unary_unary_rpc_method_handler(
                self.inspect_job,
                request_deserializer=InspectJobRequest.FromString,
                response_serializer=InspectJobRequest.SerializeToString,
            ),
            "InspectJobSet": grpc.unary_stream_rpc_method_handler(
                self.inspect_job_set,
                request_deserializer=InspectJobSetRequest.FromString,
                response_serializer=InspectJobSetRequest.SerializeToString,
            ),
            "ListJob": grpc.unary_stream_rpc_method_handler(
                self.list_job,
                request_deserializer=ListJobRequest.FromString,
                response_serializer=ListJobRequest.SerializeToString,
            ),
            "ListJobSet": grpc.unary_stream_rpc_method_handler(
                self.list_job_set,
                request_deserializer=ListJobSetRequest.FromString,
                response_serializer=ListJobSetRequest.SerializeToString,
            ),
            "SubscribeJob": grpc.unary_stream_rpc_method_handler(
                self.subscribe_job,
                request_deserializer=SubscribeJobRequest.FromString,
                response_serializer=SubscribeJobRequest.SerializeToString,
            ),
            "DeleteJob": grpc.unary_unary_rpc_method_handler(
                self.delete_job,
                request_deserializer=DeleteJobRequest.FromString,
                response_serializer=DeleteJobRequest.SerializeToString,
            ),
            "StopJob": grpc.unary_unary_rpc_method_handler(
                self.stop_job,
                request_deserializer=StopJobRequest.FromString,
                response_serializer=StopJobRequest.SerializeToString,
            ),
            "InspectDatum": grpc.unary_unary_rpc_method_handler(
                self.inspect_datum,
                request_deserializer=InspectDatumRequest.FromString,
                response_serializer=InspectDatumRequest.SerializeToString,
            ),
            "ListDatum": grpc.unary_stream_rpc_method_handler(
                self.list_datum,
                request_deserializer=ListDatumRequest.FromString,
                response_serializer=ListDatumRequest.SerializeToString,
            ),
            "CreateDatum": grpc.stream_stream_rpc_method_handler(
                self.create_datum,
                request_deserializer=CreateDatumRequest.FromString,
                response_serializer=CreateDatumRequest.SerializeToString,
            ),
            "RestartDatum": grpc.unary_unary_rpc_method_handler(
                self.restart_datum,
                request_deserializer=RestartDatumRequest.FromString,
                response_serializer=RestartDatumRequest.SerializeToString,
            ),
            "RerunPipeline": grpc.unary_unary_rpc_method_handler(
                self.rerun_pipeline,
                request_deserializer=RerunPipelineRequest.FromString,
                response_serializer=RerunPipelineRequest.SerializeToString,
            ),
            "CreatePipeline": grpc.unary_unary_rpc_method_handler(
                self.create_pipeline,
                request_deserializer=CreatePipelineRequest.FromString,
                response_serializer=CreatePipelineRequest.SerializeToString,
            ),
            "CreatePipelineV2": grpc.unary_unary_rpc_method_handler(
                self.create_pipeline_v2,
                request_deserializer=CreatePipelineV2Request.FromString,
                response_serializer=CreatePipelineV2Request.SerializeToString,
            ),
            "InspectPipeline": grpc.unary_unary_rpc_method_handler(
                self.inspect_pipeline,
                request_deserializer=InspectPipelineRequest.FromString,
                response_serializer=InspectPipelineRequest.SerializeToString,
            ),
            "ListPipeline": grpc.unary_stream_rpc_method_handler(
                self.list_pipeline,
                request_deserializer=ListPipelineRequest.FromString,
                response_serializer=ListPipelineRequest.SerializeToString,
            ),
            "DeletePipeline": grpc.unary_unary_rpc_method_handler(
                self.delete_pipeline,
                request_deserializer=DeletePipelineRequest.FromString,
                response_serializer=DeletePipelineRequest.SerializeToString,
            ),
            "DeletePipelines": grpc.unary_unary_rpc_method_handler(
                self.delete_pipelines,
                request_deserializer=DeletePipelinesRequest.FromString,
                response_serializer=DeletePipelinesRequest.SerializeToString,
            ),
            "StartPipeline": grpc.unary_unary_rpc_method_handler(
                self.start_pipeline,
                request_deserializer=StartPipelineRequest.FromString,
                response_serializer=StartPipelineRequest.SerializeToString,
            ),
            "StopPipeline": grpc.unary_unary_rpc_method_handler(
                self.stop_pipeline,
                request_deserializer=StopPipelineRequest.FromString,
                response_serializer=StopPipelineRequest.SerializeToString,
            ),
            "RunPipeline": grpc.unary_unary_rpc_method_handler(
                self.run_pipeline,
                request_deserializer=RunPipelineRequest.FromString,
                response_serializer=RunPipelineRequest.SerializeToString,
            ),
            "RunCron": grpc.unary_unary_rpc_method_handler(
                self.run_cron,
                request_deserializer=RunCronRequest.FromString,
                response_serializer=RunCronRequest.SerializeToString,
            ),
            "CheckStatus": grpc.unary_stream_rpc_method_handler(
                self.check_status,
                request_deserializer=CheckStatusRequest.FromString,
                response_serializer=CheckStatusRequest.SerializeToString,
            ),
            "CreateSecret": grpc.unary_unary_rpc_method_handler(
                self.create_secret,
                request_deserializer=CreateSecretRequest.FromString,
                response_serializer=CreateSecretRequest.SerializeToString,
            ),
            "DeleteSecret": grpc.unary_unary_rpc_method_handler(
                self.delete_secret,
                request_deserializer=DeleteSecretRequest.FromString,
                response_serializer=DeleteSecretRequest.SerializeToString,
            ),
            "ListSecret": grpc.unary_unary_rpc_method_handler(
                self.list_secret,
                request_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
                response_serializer=betterproto_lib_google_protobuf.Empty.SerializeToString,
            ),
            "InspectSecret": grpc.unary_unary_rpc_method_handler(
                self.inspect_secret,
                request_deserializer=InspectSecretRequest.FromString,
                response_serializer=InspectSecretRequest.SerializeToString,
            ),
            "DeleteAll": grpc.unary_unary_rpc_method_handler(
                self.delete_all,
                request_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
                response_serializer=betterproto_lib_google_protobuf.Empty.SerializeToString,
            ),
            "GetLogs": grpc.unary_stream_rpc_method_handler(
                self.get_logs,
                request_deserializer=GetLogsRequest.FromString,
                response_serializer=GetLogsRequest.SerializeToString,
            ),
            "ActivateAuth": grpc.unary_unary_rpc_method_handler(
                self.activate_auth,
                request_deserializer=ActivateAuthRequest.FromString,
                response_serializer=ActivateAuthRequest.SerializeToString,
            ),
            "UpdateJobState": grpc.unary_unary_rpc_method_handler(
                self.update_job_state,
                request_deserializer=UpdateJobStateRequest.FromString,
                response_serializer=UpdateJobStateRequest.SerializeToString,
            ),
            "RunLoadTest": grpc.unary_unary_rpc_method_handler(
                self.run_load_test,
                request_deserializer=RunLoadTestRequest.FromString,
                response_serializer=RunLoadTestRequest.SerializeToString,
            ),
            "RunLoadTestDefault": grpc.unary_unary_rpc_method_handler(
                self.run_load_test_default,
                request_deserializer=betterproto_lib_google_protobuf.Empty.FromString,
                response_serializer=betterproto_lib_google_protobuf.Empty.SerializeToString,
            ),
            "RenderTemplate": grpc.unary_unary_rpc_method_handler(
                self.render_template,
                request_deserializer=RenderTemplateRequest.FromString,
                response_serializer=RenderTemplateRequest.SerializeToString,
            ),
            "ListTask": grpc.unary_stream_rpc_method_handler(
                self.list_task,
                request_deserializer=_taskapi__.ListTaskRequest.FromString,
                response_serializer=_taskapi__.ListTaskRequest.SerializeToString,
            ),
            "GetKubeEvents": grpc.unary_stream_rpc_method_handler(
                self.get_kube_events,
                request_deserializer=LokiRequest.FromString,
                response_serializer=LokiRequest.SerializeToString,
            ),
            "QueryLoki": grpc.unary_stream_rpc_method_handler(
                self.query_loki,
                request_deserializer=LokiRequest.FromString,
                response_serializer=LokiRequest.SerializeToString,
            ),
            "GetClusterDefaults": grpc.unary_unary_rpc_method_handler(
                self.get_cluster_defaults,
                request_deserializer=GetClusterDefaultsRequest.FromString,
                response_serializer=GetClusterDefaultsRequest.SerializeToString,
            ),
            "SetClusterDefaults": grpc.unary_unary_rpc_method_handler(
                self.set_cluster_defaults,
                request_deserializer=SetClusterDefaultsRequest.FromString,
                response_serializer=SetClusterDefaultsRequest.SerializeToString,
            ),
            "GetProjectDefaults": grpc.unary_unary_rpc_method_handler(
                self.get_project_defaults,
                request_deserializer=GetProjectDefaultsRequest.FromString,
                response_serializer=GetProjectDefaultsRequest.SerializeToString,
            ),
            "SetProjectDefaults": grpc.unary_unary_rpc_method_handler(
                self.set_project_defaults,
                request_deserializer=SetProjectDefaultsRequest.FromString,
                response_serializer=SetProjectDefaultsRequest.SerializeToString,
            ),
        }

Methods

def inspect_job(self, job: Job, wait: bool, details: bool, context: grpc.ServicerContext) ‑> JobInfo
Expand source code
def inspect_job(
    self, job: "Job", wait: bool, details: bool, context: "grpc.ServicerContext"
) -> "JobInfo":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def inspect_job_set(self, job_set: JobSet, wait: bool, details: bool, context: grpc.ServicerContext) ‑> Iterator[JobInfo]
Expand source code
def inspect_job_set(
    self,
    job_set: "JobSet",
    wait: bool,
    details: bool,
    context: "grpc.ServicerContext",
) -> Iterator["JobInfo"]:
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def list_job(self, projects: Optional[List[ForwardRef('_pfs__.Project')]], pipeline: Pipeline, input_commit: Optional[List[ForwardRef('_pfs__.Commit')]], history: int, details: bool, jq_filter: str, pagination_marker: datetime.datetime, number: int, reverse: bool, context: grpc.ServicerContext) ‑> Iterator[JobInfo]
Expand source code
def list_job(
    self,
    projects: Optional[List["_pfs__.Project"]],
    pipeline: "Pipeline",
    input_commit: Optional[List["_pfs__.Commit"]],
    history: int,
    details: bool,
    jq_filter: str,
    pagination_marker: datetime,
    number: int,
    reverse: bool,
    context: "grpc.ServicerContext",
) -> Iterator["JobInfo"]:
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def list_job_set(self, details: bool, projects: Optional[List[ForwardRef('_pfs__.Project')]], pagination_marker: datetime.datetime, number: int, reverse: bool, jq_filter: str, context: grpc.ServicerContext) ‑> Iterator[JobSetInfo]
Expand source code
def list_job_set(
    self,
    details: bool,
    projects: Optional[List["_pfs__.Project"]],
    pagination_marker: datetime,
    number: int,
    reverse: bool,
    jq_filter: str,
    context: "grpc.ServicerContext",
) -> Iterator["JobSetInfo"]:
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def subscribe_job(self, pipeline: Pipeline, details: bool, context: grpc.ServicerContext) ‑> Iterator[JobInfo]
Expand source code
def subscribe_job(
    self, pipeline: "Pipeline", details: bool, context: "grpc.ServicerContext"
) -> Iterator["JobInfo"]:
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def delete_job(self, job: Job, context: grpc.ServicerContext) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def delete_job(
    self, job: "Job", context: "grpc.ServicerContext"
) -> "betterproto_lib_google_protobuf.Empty":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def stop_job(self, job: Job, reason: str, context: grpc.ServicerContext) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def stop_job(
    self, job: "Job", reason: str, context: "grpc.ServicerContext"
) -> "betterproto_lib_google_protobuf.Empty":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def inspect_datum(self, datum: Datum, context: grpc.ServicerContext) ‑> DatumInfo
Expand source code
def inspect_datum(
    self, datum: "Datum", context: "grpc.ServicerContext"
) -> "DatumInfo":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def list_datum(self, job: Job, input: Input, filter: ListDatumRequestFilter, pagination_marker: str, number: int, reverse: bool, context: grpc.ServicerContext) ‑> Iterator[DatumInfo]
Expand source code
def list_datum(
    self,
    job: "Job",
    input: "Input",
    filter: "ListDatumRequestFilter",
    pagination_marker: str,
    number: int,
    reverse: bool,
    context: "grpc.ServicerContext",
) -> Iterator["DatumInfo"]:
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def create_datum(self, request_iterator: Iterator[ForwardRef('CreateDatumRequest')], context: grpc.ServicerContext) ‑> Iterator[DatumInfo]
Expand source code
def create_datum(
    self,
    request_iterator: Iterator["CreateDatumRequest"],
    context: "grpc.ServicerContext",
) -> Iterator["DatumInfo"]:
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def restart_datum(self, job: Job, data_filters: Optional[List[str]], context: grpc.ServicerContext) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def restart_datum(
    self,
    job: "Job",
    data_filters: Optional[List[str]],
    context: "grpc.ServicerContext",
) -> "betterproto_lib_google_protobuf.Empty":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def rerun_pipeline(self, pipeline: Pipeline, reprocess: bool, context: grpc.ServicerContext) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def rerun_pipeline(
    self, pipeline: "Pipeline", reprocess: bool, context: "grpc.ServicerContext"
) -> "betterproto_lib_google_protobuf.Empty":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def create_pipeline(self, pipeline: Pipeline, tf_job: TfJob, transform: Transform, parallelism_spec: ParallelismSpec, egress: Egress, update: bool, output_branch: str, s3_out: bool, resource_requests: ResourceSpec, resource_limits: ResourceSpec, sidecar_resource_limits: ResourceSpec, input: Input, description: str, reprocess: bool, service: Service, spout: Spout, datum_set_spec: DatumSetSpec, datum_timeout: datetime.timedelta, job_timeout: datetime.timedelta, salt: str, datum_tries: int, scheduling_spec: SchedulingSpec, pod_spec: str, pod_patch: str, spec_commit: _pfs__.Commit, metadata: Metadata, reprocess_spec: str, autoscaling: bool, tolerations: Optional[List[ForwardRef('Toleration')]], sidecar_resource_requests: ResourceSpec, dry_run: bool, determined: Determined, maximum_expected_uptime: datetime.timedelta, context: grpc.ServicerContext) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def create_pipeline(
    self,
    pipeline: "Pipeline",
    tf_job: "TfJob",
    transform: "Transform",
    parallelism_spec: "ParallelismSpec",
    egress: "Egress",
    update: bool,
    output_branch: str,
    s3_out: bool,
    resource_requests: "ResourceSpec",
    resource_limits: "ResourceSpec",
    sidecar_resource_limits: "ResourceSpec",
    input: "Input",
    description: str,
    reprocess: bool,
    service: "Service",
    spout: "Spout",
    datum_set_spec: "DatumSetSpec",
    datum_timeout: timedelta,
    job_timeout: timedelta,
    salt: str,
    datum_tries: int,
    scheduling_spec: "SchedulingSpec",
    pod_spec: str,
    pod_patch: str,
    spec_commit: "_pfs__.Commit",
    metadata: "Metadata",
    reprocess_spec: str,
    autoscaling: bool,
    tolerations: Optional[List["Toleration"]],
    sidecar_resource_requests: "ResourceSpec",
    dry_run: bool,
    determined: "Determined",
    maximum_expected_uptime: timedelta,
    context: "grpc.ServicerContext",
) -> "betterproto_lib_google_protobuf.Empty":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def create_pipeline_v2(self, create_pipeline_request_json: str, dry_run: bool, update: bool, reprocess: bool, context: grpc.ServicerContext) ‑> CreatePipelineV2Response
Expand source code
def create_pipeline_v2(
    self,
    create_pipeline_request_json: str,
    dry_run: bool,
    update: bool,
    reprocess: bool,
    context: "grpc.ServicerContext",
) -> "CreatePipelineV2Response":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def inspect_pipeline(self, pipeline: Pipeline, details: bool, context: grpc.ServicerContext) ‑> PipelineInfo
Expand source code
def inspect_pipeline(
    self, pipeline: "Pipeline", details: bool, context: "grpc.ServicerContext"
) -> "PipelineInfo":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def list_pipeline(self, pipeline: Pipeline, history: int, details: bool, jq_filter: str, commit_set: _pfs__.CommitSet, projects: Optional[List[ForwardRef('_pfs__.Project')]], context: grpc.ServicerContext) ‑> Iterator[PipelineInfo]
Expand source code
def list_pipeline(
    self,
    pipeline: "Pipeline",
    history: int,
    details: bool,
    jq_filter: str,
    commit_set: "_pfs__.CommitSet",
    projects: Optional[List["_pfs__.Project"]],
    context: "grpc.ServicerContext",
) -> Iterator["PipelineInfo"]:
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def delete_pipeline(self, pipeline: Pipeline, all: bool, force: bool, keep_repo: bool, must_exist: bool, context: grpc.ServicerContext) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def delete_pipeline(
    self,
    pipeline: "Pipeline",
    all: bool,
    force: bool,
    keep_repo: bool,
    must_exist: bool,
    context: "grpc.ServicerContext",
) -> "betterproto_lib_google_protobuf.Empty":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def delete_pipelines(self, projects: Optional[List[ForwardRef('_pfs__.Project')]], force: bool, keep_repo: bool, all: bool, context: grpc.ServicerContext) ‑> DeletePipelinesResponse
Expand source code
def delete_pipelines(
    self,
    projects: Optional[List["_pfs__.Project"]],
    force: bool,
    keep_repo: bool,
    all: bool,
    context: "grpc.ServicerContext",
) -> "DeletePipelinesResponse":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def start_pipeline(self, pipeline: Pipeline, context: grpc.ServicerContext) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def start_pipeline(
    self, pipeline: "Pipeline", context: "grpc.ServicerContext"
) -> "betterproto_lib_google_protobuf.Empty":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def stop_pipeline(self, pipeline: Pipeline, must_exist: bool, context: grpc.ServicerContext) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def stop_pipeline(
    self, pipeline: "Pipeline", must_exist: bool, context: "grpc.ServicerContext"
) -> "betterproto_lib_google_protobuf.Empty":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def run_pipeline(self, pipeline: Pipeline, provenance: Optional[List[ForwardRef('_pfs__.Commit')]], job_id: str, context: grpc.ServicerContext) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def run_pipeline(
    self,
    pipeline: "Pipeline",
    provenance: Optional[List["_pfs__.Commit"]],
    job_id: str,
    context: "grpc.ServicerContext",
) -> "betterproto_lib_google_protobuf.Empty":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def run_cron(self, pipeline: Pipeline, context: grpc.ServicerContext) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def run_cron(
    self, pipeline: "Pipeline", context: "grpc.ServicerContext"
) -> "betterproto_lib_google_protobuf.Empty":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def check_status(self, all: bool, project: _pfs__.Project, context: grpc.ServicerContext) ‑> Iterator[CheckStatusResponse]
Expand source code
def check_status(
    self, all: bool, project: "_pfs__.Project", context: "grpc.ServicerContext"
) -> Iterator["CheckStatusResponse"]:
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def create_secret(self, file: bytes, context: grpc.ServicerContext) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def create_secret(
    self, file: bytes, context: "grpc.ServicerContext"
) -> "betterproto_lib_google_protobuf.Empty":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def delete_secret(self, secret: Secret, context: grpc.ServicerContext) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def delete_secret(
    self, secret: "Secret", context: "grpc.ServicerContext"
) -> "betterproto_lib_google_protobuf.Empty":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def list_secret(self, context: grpc.ServicerContext) ‑> SecretInfos
Expand source code
def list_secret(self, context: "grpc.ServicerContext") -> "SecretInfos":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def inspect_secret(self, secret: Secret, context: grpc.ServicerContext) ‑> SecretInfo
Expand source code
def inspect_secret(
    self, secret: "Secret", context: "grpc.ServicerContext"
) -> "SecretInfo":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def delete_all(self, context: grpc.ServicerContext) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def delete_all(
    self, context: "grpc.ServicerContext"
) -> "betterproto_lib_google_protobuf.Empty":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def get_logs(self, pipeline: Pipeline, job: Job, data_filters: Optional[List[str]], datum: Datum, master: bool, follow: bool, tail: int, use_loki_backend: bool, since: datetime.timedelta, context: grpc.ServicerContext) ‑> Iterator[LogMessage]
Expand source code
def get_logs(
    self,
    pipeline: "Pipeline",
    job: "Job",
    data_filters: Optional[List[str]],
    datum: "Datum",
    master: bool,
    follow: bool,
    tail: int,
    use_loki_backend: bool,
    since: timedelta,
    context: "grpc.ServicerContext",
) -> Iterator["LogMessage"]:
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def activate_auth(self, context: grpc.ServicerContext) ‑> ActivateAuthResponse
Expand source code
def activate_auth(self, context: "grpc.ServicerContext") -> "ActivateAuthResponse":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def update_job_state(self, job: Job, state: JobState, reason: str, restart: int, data_processed: int, data_skipped: int, data_failed: int, data_recovered: int, data_total: int, stats: ProcessStats, context: grpc.ServicerContext) ‑> betterproto.lib.google.protobuf.Empty
Expand source code
def update_job_state(
    self,
    job: "Job",
    state: "JobState",
    reason: str,
    restart: int,
    data_processed: int,
    data_skipped: int,
    data_failed: int,
    data_recovered: int,
    data_total: int,
    stats: "ProcessStats",
    context: "grpc.ServicerContext",
) -> "betterproto_lib_google_protobuf.Empty":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def run_load_test(self, dag_spec: str, load_spec: str, seed: int, parallelism: int, pod_patch: str, state_id: str, context: grpc.ServicerContext) ‑> RunLoadTestResponse
Expand source code
def run_load_test(
    self,
    dag_spec: str,
    load_spec: str,
    seed: int,
    parallelism: int,
    pod_patch: str,
    state_id: str,
    context: "grpc.ServicerContext",
) -> "RunLoadTestResponse":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def run_load_test_default(self, context: grpc.ServicerContext) ‑> RunLoadTestResponse
Expand source code
def run_load_test_default(
    self, context: "grpc.ServicerContext"
) -> "RunLoadTestResponse":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def render_template(self, template: str, args: Dict[str, str], context: grpc.ServicerContext) ‑> RenderTemplateResponse
Expand source code
def render_template(
    self, template: str, args: Dict[str, str], context: "grpc.ServicerContext"
) -> "RenderTemplateResponse":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def list_task(self, group: Group, context: grpc.ServicerContext) ‑> Iterator[_taskapi__.TaskInfo]
Expand source code
def list_task(
    self, group: "Group", context: "grpc.ServicerContext"
) -> Iterator["_taskapi__.TaskInfo"]:
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def get_kube_events(self, since: datetime.timedelta, query: str, context: grpc.ServicerContext) ‑> Iterator[LokiLogMessage]
Expand source code
def get_kube_events(
    self, since: timedelta, query: str, context: "grpc.ServicerContext"
) -> Iterator["LokiLogMessage"]:
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def query_loki(self, since: datetime.timedelta, query: str, context: grpc.ServicerContext) ‑> Iterator[LokiLogMessage]
Expand source code
def query_loki(
    self, since: timedelta, query: str, context: "grpc.ServicerContext"
) -> Iterator["LokiLogMessage"]:
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def get_cluster_defaults(self, context: grpc.ServicerContext) ‑> GetClusterDefaultsResponse
Expand source code
def get_cluster_defaults(
    self, context: "grpc.ServicerContext"
) -> "GetClusterDefaultsResponse":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def set_cluster_defaults(self, regenerate: bool, reprocess: bool, dry_run: bool, cluster_defaults_json: str, context: grpc.ServicerContext) ‑> SetClusterDefaultsResponse
Expand source code
def set_cluster_defaults(
    self,
    regenerate: bool,
    reprocess: bool,
    dry_run: bool,
    cluster_defaults_json: str,
    context: "grpc.ServicerContext",
) -> "SetClusterDefaultsResponse":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def get_project_defaults(self, project: _pfs__.Project, context: grpc.ServicerContext) ‑> GetProjectDefaultsResponse
Expand source code
def get_project_defaults(
    self, project: "_pfs__.Project", context: "grpc.ServicerContext"
) -> "GetProjectDefaultsResponse":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")
def set_project_defaults(self, project: _pfs__.Project, regenerate: bool, reprocess: bool, dry_run: bool, project_defaults_json: str, context: grpc.ServicerContext) ‑> SetProjectDefaultsResponse
Expand source code
def set_project_defaults(
    self,
    project: "_pfs__.Project",
    regenerate: bool,
    reprocess: bool,
    dry_run: bool,
    project_defaults_json: str,
    context: "grpc.ServicerContext",
) -> "SetProjectDefaultsResponse":
    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    context.set_details("Method not implemented!")
    raise NotImplementedError("Method not implemented!")