From 2ba1651b2ff26ff55f79cbc10f46cbf6e8d511eb Mon Sep 17 00:00:00 2001 From: Mehdi Drissi Date: Wed, 10 Aug 2022 17:17:15 -0700 Subject: [PATCH] Add tensorflow type stubs --- tensorflow/__init__.pyi | 479 ++++++++++++++++++ tensorflow/autograph/__init__.pyi | 1 + tensorflow/autograph/experimental.pyi | 10 + tensorflow/compat/__init__.pyi | 1 + tensorflow/compat/v1/__init__.pyi | 80 +++ tensorflow/compat/v1/graph_util.pyi | 12 + tensorflow/compat/v1/saved_model/__init__.pyi | 17 + tensorflow/compat/v1/saved_model/builder.pyi | 3 + tensorflow/compat/v1/saved_model/loader.pyi | 7 + .../compat/v1/saved_model/tag_constants.pyi | 4 + tensorflow/config/__init__.pyi | 0 tensorflow/config/experimental.pyi | 1 + tensorflow/data/__init__.pyi | 63 +++ tensorflow/data/experimental.pyi | 20 + tensorflow/distribute.pyi | 39 ++ tensorflow/dtypes.pyi | 54 ++ tensorflow/errors.pyi | 3 + tensorflow/estimator/__init__.pyi | 150 ++++++ tensorflow/estimator/export.pyi | 14 + tensorflow/graph_util.pyi | 26 + tensorflow/initializers.pyi | 1 + tensorflow/io.pyi | 3 + tensorflow/keras/__init__.pyi | 217 ++++++++ tensorflow/keras/activations.pyi | 7 + tensorflow/keras/applications.pyi | 0 tensorflow/keras/callbacks.pyi | 100 ++++ tensorflow/keras/constraints.pyi | 16 + tensorflow/keras/initializers.pyi | 44 ++ tensorflow/keras/layers.pyi | 131 +++++ tensorflow/keras/losses.pyi | 42 ++ tensorflow/keras/metrics.pyi | 25 + tensorflow/keras/models.pyi | 8 + tensorflow/keras/optimizers/__init__.pyi | 71 +++ tensorflow/keras/optimizers/schedules.pyi | 97 ++++ tensorflow/keras/regularizers.pyi | 20 + tensorflow/keras/utils/__init__.pyi | 15 + tensorflow/keras/utils/experimental.pyi | 12 + tensorflow/linalg.pyi | 34 ++ tensorflow/lookup.pyi | 68 +++ tensorflow/losses.pyi | 1 + tensorflow/math.pyi | 230 +++++++++ tensorflow/metrics.pyi | 1 + tensorflow/nn.pyi | 31 ++ tensorflow/optimizers/__init__.pyi | 5 + tensorflow/python/__init__.pyi | 3 + tensorflow/python/framework/__init__.pyi | 0 tensorflow/python/framework/test_util.pyi | 9 + tensorflow/python/keras/__init__.pyi | 0 tensorflow/python/keras/utils/__init__.pyi | 0 .../python/keras/utils/generic_utils.pyi | 1 + tensorflow/python/layers/__init__.pyi | 3 + tensorflow/python/layers/base.pyi | 3 + tensorflow/python/training/__init__.pyi | 0 .../python/training/tracking/__init__.pyi | 0 .../training/tracking/autotrackable.pyi | 1 + tensorflow/ragged.pyi | 8 + tensorflow/random.pyi | 40 ++ tensorflow/saved_model/__init__.pyi | 65 +++ tensorflow/saved_model/experimental.pyi | 3 + tensorflow/sparse.pyi | 31 ++ tensorflow/strings.pyi | 49 ++ tensorflow/summary.pyi | 9 + tensorflow/test.pyi | 21 + tensorflow/types/__init__.pyi | 1 + tensorflow/types/experimental.pyi | 33 ++ 65 files changed, 2443 insertions(+) create mode 100644 tensorflow/__init__.pyi create mode 100644 tensorflow/autograph/__init__.pyi create mode 100644 tensorflow/autograph/experimental.pyi create mode 100644 tensorflow/compat/__init__.pyi create mode 100644 tensorflow/compat/v1/__init__.pyi create mode 100644 tensorflow/compat/v1/graph_util.pyi create mode 100644 tensorflow/compat/v1/saved_model/__init__.pyi create mode 100644 tensorflow/compat/v1/saved_model/builder.pyi create mode 100644 tensorflow/compat/v1/saved_model/loader.pyi create mode 100644 tensorflow/compat/v1/saved_model/tag_constants.pyi create mode 100644 tensorflow/config/__init__.pyi create mode 100644 tensorflow/config/experimental.pyi create mode 100644 tensorflow/data/__init__.pyi create mode 100644 tensorflow/data/experimental.pyi create mode 100644 tensorflow/distribute.pyi create mode 100644 tensorflow/dtypes.pyi create mode 100644 tensorflow/errors.pyi create mode 100644 tensorflow/estimator/__init__.pyi create mode 100644 tensorflow/estimator/export.pyi create mode 100644 tensorflow/graph_util.pyi create mode 100644 tensorflow/initializers.pyi create mode 100644 tensorflow/io.pyi create mode 100644 tensorflow/keras/__init__.pyi create mode 100644 tensorflow/keras/activations.pyi create mode 100644 tensorflow/keras/applications.pyi create mode 100644 tensorflow/keras/callbacks.pyi create mode 100644 tensorflow/keras/constraints.pyi create mode 100644 tensorflow/keras/initializers.pyi create mode 100644 tensorflow/keras/layers.pyi create mode 100644 tensorflow/keras/losses.pyi create mode 100644 tensorflow/keras/metrics.pyi create mode 100644 tensorflow/keras/models.pyi create mode 100644 tensorflow/keras/optimizers/__init__.pyi create mode 100644 tensorflow/keras/optimizers/schedules.pyi create mode 100644 tensorflow/keras/regularizers.pyi create mode 100644 tensorflow/keras/utils/__init__.pyi create mode 100644 tensorflow/keras/utils/experimental.pyi create mode 100644 tensorflow/linalg.pyi create mode 100644 tensorflow/lookup.pyi create mode 100644 tensorflow/losses.pyi create mode 100644 tensorflow/math.pyi create mode 100644 tensorflow/metrics.pyi create mode 100644 tensorflow/nn.pyi create mode 100644 tensorflow/optimizers/__init__.pyi create mode 100644 tensorflow/python/__init__.pyi create mode 100644 tensorflow/python/framework/__init__.pyi create mode 100644 tensorflow/python/framework/test_util.pyi create mode 100644 tensorflow/python/keras/__init__.pyi create mode 100644 tensorflow/python/keras/utils/__init__.pyi create mode 100644 tensorflow/python/keras/utils/generic_utils.pyi create mode 100644 tensorflow/python/layers/__init__.pyi create mode 100644 tensorflow/python/layers/base.pyi create mode 100644 tensorflow/python/training/__init__.pyi create mode 100644 tensorflow/python/training/tracking/__init__.pyi create mode 100644 tensorflow/python/training/tracking/autotrackable.pyi create mode 100644 tensorflow/ragged.pyi create mode 100644 tensorflow/random.pyi create mode 100644 tensorflow/saved_model/__init__.pyi create mode 100644 tensorflow/saved_model/experimental.pyi create mode 100644 tensorflow/sparse.pyi create mode 100644 tensorflow/strings.pyi create mode 100644 tensorflow/summary.pyi create mode 100644 tensorflow/test.pyi create mode 100644 tensorflow/types/__init__.pyi create mode 100644 tensorflow/types/experimental.pyi diff --git a/tensorflow/__init__.pyi b/tensorflow/__init__.pyi new file mode 100644 index 00000000..ff3c686a --- /dev/null +++ b/tensorflow/__init__.pyi @@ -0,0 +1,479 @@ +from typing import Any, Callable, Iterable, Iterator, Mapping, NoReturn, Sequence, TypeVar, overload +from typing_extensions import Literal, ParamSpec + +from builtins import bool as _bool +from contextlib import contextmanager +from enum import Enum +from logging import Logger +from types import TracebackType + +import numpy as np + +from tensorflow import autograph as autograph +from tensorflow import compat as compat +from tensorflow import data as data +from tensorflow import dtypes as dtypes +from tensorflow import errors as errors +from tensorflow import estimator as estimator +from tensorflow import initializers as initializers +from tensorflow import io as io +from tensorflow import keras as keras +from tensorflow import linalg as linalg +from tensorflow import lookup as lookup +from tensorflow import math as math +from tensorflow import metrics as metrics +from tensorflow import nn as nn +from tensorflow import optimizers as optimizers +from tensorflow import ragged as ragged +from tensorflow import random as random +from tensorflow import saved_model as saved_model +from tensorflow import sparse as sparse +from tensorflow import strings as strings +from tensorflow import summary as summary +from tensorflow import test as test +from tensorflow import types as types +from tensorflow.compat.v1 import _FeedDict # type: ignore +from tensorflow.dtypes import * +from tensorflow.graph_util import import_graph_def as import_graph_def +from tensorflow.keras.initializers import Zeros +from tensorflow.linalg import matmul as matmul + +# Most tf.math functions are exported as tf, but sadly not all are. +from tensorflow.math import abs as abs +from tensorflow.math import add as add +from tensorflow.math import add_n as add_n +from tensorflow.math import argmax as argmax +from tensorflow.math import argmin as argmin +from tensorflow.math import cos as cos +from tensorflow.math import cosh as cosh +from tensorflow.math import divide as divide +from tensorflow.math import equal as equal +from tensorflow.math import greater as greater +from tensorflow.math import greater_equal as greater_equal +from tensorflow.math import less as less +from tensorflow.math import less_equal as less_equal +from tensorflow.math import logical_and as logical_and +from tensorflow.math import logical_not as logical_not +from tensorflow.math import logical_or as logical_or +from tensorflow.math import maximum as maximum +from tensorflow.math import minimum as minimum +from tensorflow.math import multiply as multiply +from tensorflow.math import not_equal as not_equal +from tensorflow.math import pow as pow +from tensorflow.math import reduce_max as reduce_max +from tensorflow.math import reduce_mean as reduce_mean +from tensorflow.math import reduce_min as reduce_min +from tensorflow.math import reduce_prod as reduce_prod +from tensorflow.math import reduce_sum as reduce_sum +from tensorflow.math import sigmoid as sigmoid +from tensorflow.math import sign as sign +from tensorflow.math import sin as sin +from tensorflow.math import sinh as sinh +from tensorflow.math import sqrt as sqrt +from tensorflow.math import square as square +from tensorflow.math import subtract as subtract +from tensorflow.math import tanh as tanh +from tensorflow.python.training.tracking.autotrackable import AutoTrackable +from tensorflow.sparse import SparseTensor as SparseTensor +from tensorflow.strings import as_string as as_string + +from bento.utils.tensor_types import * + +# These types are written based on usage. If type annotation is inconsistent with runtime feel free to improve it. +# Some of these classes may be missing methods. You can use def __getattr__(self, name: str) -> Any: ... +# when you know it is incomplete with major methods missing. If most common methods are present it is fine to +# leave it out and then fill in more when errors are encountered. + +# tf.compat.v1 api will mostly not be type stubbed because goal is to remove that from codebase entirely. If there +# is functionality with no tf2 equivalents we may include them in the stubs. But stuff like tf.Session/placeholder should be +# strongly avoided. + +# Tensors ideally should be a generic type, but properly typing data type/shape +# will be a lot of work. Until we have good non-generic tensorflow stubs, +# we will skip making Tensor generic. Also good type hints for shapes will +# run quickly into many places where type system is not strong enough today. +# So shape typing is probably not worth doing anytime soon. +_SliceT = int | slice | None + +_R = TypeVar("_R") +_P = ParamSpec("_P") + +class Tensor: + def consumers(self) -> list[Operation]: ... + @property + def shape(self) -> TensorShape: ... + def get_shape(self) -> TensorShape: ... + @property + def dtype(self) -> DType: ... + @property + def graph(self) -> Graph: ... + @property + def name(self) -> str: ... + @property + def op(self) -> Operation: ... + def numpy(self) -> np.ndarray[Any, Any]: ... + def eval( + self, feed_dict: _FeedDict | None = None, session: tf.compat.v1.Session | None = None + ) -> np.ndarray[Any, Any]: ... + def __int__(self) -> int: ... + def __abs__(self) -> Tensor: ... + def __add__(self, other: TensorCompatible) -> Tensor: ... + def __radd__(self, other: TensorCompatible) -> Tensor: ... + def __sub__(self, other: TensorCompatible) -> Tensor: ... + def __rsub__(self, other: TensorCompatible) -> Tensor: ... + def __mul__(self, other: TensorCompatible) -> Tensor: ... + def __rmul__(self, other: TensorCompatible) -> Tensor: ... + def __pow__(self, other: TensorCompatible) -> Tensor: ... + def __matmul__(self, other: TensorCompatible) -> Tensor: ... + def __rmatmul__(self, other: TensorCompatible) -> Tensor: ... + def __floordiv__(self, other: TensorCompatible) -> Tensor: ... + def __rfloordiv__(self, other: TensorCompatible) -> Tensor: ... + def __truediv__(self, other: TensorCompatible) -> Tensor: ... + def __rtruediv__(self, other: TensorCompatible) -> Tensor: ... + def __neg__(self) -> Tensor: ... + def __and__(self, other: TensorCompatible) -> Tensor: ... + def __rand__(self, other: TensorCompatible) -> Tensor: ... + def __or__(self, other: TensorCompatible) -> Tensor: ... + def __ror__(self, other: TensorCompatible) -> Tensor: ... + def __eq__(self, other: TensorCompatible) -> Tensor: ... + def __ne__(self, other: TensorCompatible) -> Tensor: ... + def __ge__(self, other: TensorCompatible) -> Tensor: ... + def __gt__(self, other: TensorCompatible) -> Tensor: ... + def __le__(self, other: TensorCompatible) -> Tensor: ... + def __lt__(self, other: TensorCompatible) -> Tensor: ... + def __bool__(self) -> NoReturn: ... + def __getitem__(self, slice_spec: _SliceT | tuple[_SliceT, ...]) -> Tensor: ... + def __len__(self) -> int: ... + # This only works for rank 0 tensors. + def __index__(self) -> int: ... + +# This is mostly a white lie. Variable is not a real subclass, but it behaves very similar to one. +# Most functions/operations on tensors also work on variables. isinstance is main difference. +class Variable(Tensor): + def __init__( + self, + initial_value: TensorCompatible | Callable[[], TensorCompatible], + trainable: None | _bool = None, + validate_shape: _bool = True, + name: str | None = None, + dtype: DTypeLike | None = None, + # constraint should be used rarely. It's incompatible with asynchronous training. + constraint: Callable[[Tensor], Tensor] | None = None, + synchronization: VariableSynchronization = VariableSynchronization.AUTO, + aggregation: VariableAggregation = VariableAggregation.NONE, + ): ... + +# Most type annotations currently ignore ragged tensors due to rarity. +class RaggedTensor: + def bounding_shape( + self, axis: TensorCompatible | None = None, name: str | None = None, out_type: DTypeLike | None = None + ) -> Tensor: ... + @classmethod + def from_sparse( + cls, st_input: SparseTensor, name: str | None = None, row_splits_dtype: DTypeLike = int64 + ) -> RaggedTensor: ... + def to_sparse(self, name: str | None = None) -> SparseTensor: ... + def to_tensor( + self, default_value: float | str | None = None, name: str | None = None, shape: ShapeLike | None = None + ) -> Tensor: ... + def __add__(self, other: RaggedTensor | float) -> RaggedTensor: ... + def __radd__(self, other: RaggedTensor | float) -> RaggedTensor: ... + def __sub__(self, other: RaggedTensor | float) -> RaggedTensor: ... + def __mul__(self, other: RaggedTensor | float) -> RaggedTensor: ... + def __rmul__(self, other: RaggedTensor | float) -> RaggedTensor: ... + def __floordiv__(self, other: RaggedTensor | float) -> RaggedTensor: ... + def __truediv__(self, other: RaggedTensor | float) -> RaggedTensor: ... + def __getitem__(self, slice_spec: _SliceT | tuple[_SliceT, ...]) -> RaggedTensor: ... + def __getattr__(self, name: str) -> Any: ... + +class VariableSynchronization(Enum): + AUTO = 0 + NONE = 1 + ON_WRITE = 2 + ON_READ = 3 + +class VariableAggregation(Enum): + NONE = 0 + SUM = 1 + MEAN = 2 + ONLY_FIRST_REPLICA = 3 + +class TensorShape: + def __init__(self, dims: Iterable[int | None]): ... + @property + def rank(self) -> int: ... + def as_list(self) -> list[int | None]: ... + def assert_has_rank(self, rank: int) -> None: ... + def assert_is_compatible_with(self, other: Iterable[int | None]) -> None: ... + def __bool__(self) -> _bool: ... + @overload + def __getitem__(self, key: int) -> int | None: ... + @overload + def __getitem__(self, key: slice) -> TensorShape: ... + def __iter__(self) -> Iterator[int | None]: ... + def __len__(self) -> int: ... + def __add__(self, other: Iterable[int | None]) -> TensorShape: ... + def __radd__(self, other: Iterable[int | None]) -> TensorShape: ... + def __eq__(self, other: Iterable[int | None]) -> _bool: ... + +class TypeSpec: + def is_compatible_with(self, spec_or_value: TypeSpec | TensorCompatible | SparseTensor | RaggedTensor) -> _bool: ... + def most_specific_compatible_type(self, other: TypeSpec) -> TypeSpec: ... + +class TensorSpec(TypeSpec): + def __init__(self, shape: ShapeLike, dtype: DTypeLike = float32, name: str | None = None) -> None: ... + @property + def shape(self) -> TensorShape: ... + @property + def dtype(self) -> DType: ... + +class IndexedSlices: + def __init__(self, values: Tensor, indices: Tensor, dense_shape: None | Tensor = None): ... + @property + def values(self) -> Tensor: ... + @property + def indices(self) -> Tensor: ... + @property + def dense_shape(self) -> None | Tensor: ... + @property + def shape(self) -> TensorShape: ... + @property + def dtype(self) -> DType: ... + def __getattr__(self, name: str) -> Any: ... + +class GradientTape: + def __init__(self, persistent: _bool = False, watch_accessed_variables: _bool = True): ... + def __enter__(self) -> GradientTape: ... + def __exit__( + self, typ: type[BaseException] | None, value: BaseException | None, traceback: TracebackType | None + ) -> None: ... + def watch(self, tensor: ContainerTensorsLike) -> None: ... + def watched_variables(self) -> tuple[Variable, ...]: ... + # Higher kinded types would be nice here and these overloads are a way to simulate some of them. + @overload + def gradient( + self, + target: ContainerTensors, + sources: TensorLike, + output_gradients: list[Tensor] | None = None, + ) -> GradientsT: ... + @overload + def gradient( + self, + target: ContainerTensors, + sources: Sequence[Tensor], + output_gradients: list[Tensor] | None = None, + ) -> list[GradientsT]: ... + @overload + def gradient( + self, + target: ContainerTensors, + sources: Mapping[str, Tensor], + output_gradients: list[Tensor] | None = None, + ) -> dict[str, GradientsT]: ... + @overload + def gradient( + self, + target: ContainerTensors, + sources: ContainerTensors, + output_gradients: list[Tensor] | None = None, + ) -> ContainerGradients: ... + +def executing_eagerly() -> _bool: ... +@overload +def identity(input: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def identity(input: SparseTensor, name: str | None = None) -> SparseTensor: ... +@overload +def identity(input: SparseTensor | TensorCompatible, name: str | None = None) -> SparseTensor | Tensor: ... +def clip_by_value( + x: RaggedTensorLikeT, clip_value_min: float | Tensor, clip_value_max: float | Tensor, name: str | None = None +) -> RaggedTensorLikeT: ... +def searchsorted( + sorted_sequence: TensorCompatible, values: Tensor, side: Literal["left", "right"] = "left", name: str | None = None +) -> Tensor: ... +def constant( + value: TensorCompatible, dtype: DTypeLike | None = None, shape: ShapeLike | None = None, name: str | None = None +) -> Tensor: ... +def zeros(shape: ShapeLike, dtype: DTypeLike = dtypes.float32, name: str | None = None) -> Tensor: ... +def ones(shape: ShapeLike, dtype: DTypeLike = dtypes.float32, name: str | None = None) -> Tensor: ... +@overload +def zeros_like(input: TensorCompatible, dtype: DTypeLike | None = None, name: str | None = None) -> Tensor: ... +@overload +def zeros_like(input: RaggedTensor, dtype: DTypeLike | None = None, name: str | None = None) -> RaggedTensor: ... +@overload +def ones_like(input: TensorCompatible, dtype: DTypeLike | None = None, name: str | None = None) -> Tensor: ... +@overload +def ones_like(input: RaggedTensor, dtype: DTypeLike | None = None, name: str | None = None) -> RaggedTensor: ... +def range( + start: TensorCompatible, + limit: TensorCompatible | None = None, + delta: TensorCompatible = 1, + dtype: DTypeLike | None = None, + name: str | None = None, +) -> Tensor: ... +@overload +def concat( + values: Sequence[TensorCompatible] | TensorCompatible, + axis: int, + name: str | None = None, +) -> Tensor: ... +@overload +def concat( + values: Sequence[RaggedTensor | TensorCompatible] | RaggedTensor, + axis: int, + name: str | None = None, +) -> RaggedTensor: ... +def reshape(tensor: TensorCompatible, shape: ShapeLike | Tensor, name: str | None = None) -> Tensor: ... +@overload +def cast(x: TensorCompatible, dtype: DTypeLike, name: str | None = None) -> Tensor: ... +@overload +def cast(x: SparseTensor, dtype: DTypeLike, name: str | None = None) -> SparseTensor: ... +@overload +def cast(x: RaggedTensor, dtype: DTypeLike, name: str | None = None) -> RaggedTensor: ... +@overload +def where( + condition: TensorCompatible | RaggedTensor, x: None = None, y: None = None, name: str | None = None +) -> Tensor: ... +@overload +def where(condition: TensorCompatible, x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def where( + condition: RaggedTensor, x: RaggedTensor | float, y: RaggedTensor, name: str | None = None +) -> RaggedTensor: ... +def meshgrid( + *tensors: TensorCompatible, indexing: Literal["xy", "ij"] = "xy", name: str | None = None +) -> tuple[Tensor, ...]: ... +def shape(input: SparseTensorCompatible, out_type: DTypeLike = int32, name: str | None = None) -> Tensor: ... +def broadcast_to( + tensor: TensorCompatible, shape: TensorCompatible | TensorShape, name: str | None = None +) -> Tensor: ... +def split( + value: TensorCompatible, + num_or_size_splits: TensorCompatible, + axis: int = 0, + num: int | None = None, + name: str | None = None, +) -> list[Tensor]: ... +def stack(values: Sequence[TensorCompatible], axis: int = 0, name: str | None = "stack") -> Tensor: ... +@overload +def gather( + params: TensorCompatible, + indices: TensorCompatible, + axis: ScalarTensorCompatible | None = None, + batch_dims: int = 0, + name: str | None = None, +) -> Tensor: ... +@overload +def gather( + params: RaggedTensor, + indices: TensorCompatible, + axis: ScalarTensorCompatible | None = None, + batch_dims: int = 0, + name: str | None = None, +) -> RaggedTensor: ... +@overload +def expand_dims(input: TensorCompatible, axis: int, name: str | None = None) -> Tensor: ... +@overload +def expand_dims(input: RaggedTensor, axis: int, name: str | None = None) -> RaggedTensor: ... +@overload +def squeeze( + input: TensorCompatible, axis: int | tuple[int, ...] | list[int] | None = None, name: str | None = None +) -> Tensor: ... +@overload +def squeeze(input: RaggedTensor, axis: int | tuple[int, ...] | list[int], name: str | None = None) -> RaggedTensor: ... +def zeros_initializer() -> Zeros: ... +def pad( + tensor: TensorCompatible, + paddings: TensorCompatible, + mode: Literal["constant", "CONSTANT", "reflect", "REFLECT", "symmetric", "SYMMETRIC"] = "CONSTANT", + constant_values: float | str = 0, + name: str | None = None, +) -> Tensor: ... +def transpose( + a: TensorCompatible, perm: TensorCompatible | None = None, conjugate: _bool = False, name: str | None = None +) -> Tensor: ... +def tensordot(a: TensorCompatible, b: TensorCompatible, axes: TensorCompatible, name: str | None = None) -> Tensor: ... +def bitcast(input: TensorCompatible, dtype: DTypeLike, name: str | None = None) -> Tensor: ... +def repeat( + input: TensorCompatible, repeats: TensorCompatible, axis: int | None = None, name: str | None = None +) -> Tensor: ... +def broadcast_static_shape(shape_x: TensorShape, shape_y: TensorShape) -> TensorShape: ... +def print(*args: object, **kwargs: object) -> None: ... + +class name_scope: + def __init__(self, name: str): ... + def __enter__(self) -> str: ... + def __exit__( + self, typ: type[BaseException] | None, value: BaseException | None, traceback: TracebackType | None + ) -> None: ... + +@contextmanager +def init_scope() -> Iterator[None]: ... + +class Graph: + def add_to_collection(self, name: str, value: object): ... + def add_to_collections(self, names: Iterable[str] | str, value: object): ... + @contextmanager + def as_default(self) -> Iterator[None]: ... + def finalize(self) -> None: ... + def get_tensor_by_name(self, name: str) -> Tensor: ... + def get_operation_by_name(self, name: str) -> Operation: ... + def get_operations(self) -> list[Operation]: ... + def as_graph_def(self, from_version: int | None = None, add_shapes: _bool = False) -> compat.v1.GraphDef: ... + def get_name_scope(self) -> str: ... + +class Operation: + @property + def inputs(self) -> list[Tensor]: ... + @property + def input_types(self) -> list[DType]: ... + @property + def outputs(self) -> list[Tensor]: ... + @property + def output_types(self) -> list[DType]: ... + @property + def device(self) -> str: ... + @property + def name(self) -> str: ... + @property + def type(self) -> str: ... + def __getitem__(self, slice_spec: int | slice | tuple[int | slice, ...]) -> Operation: ... + +class Module(AutoTrackable): + def __init__(self, name: str | None) -> None: ... + @property + def name(self) -> str: ... + @property + def name_scope(self) -> tf.name_scope: ... + @property + def variables(self) -> Sequence[Variable]: ... + @property + def trainable_variables(self) -> Sequence[Variable]: ... + @property + def non_trainable_variables(self) -> Sequence[Variable]: ... + +@overload +def function( + func: None = None, + input_signature: ContainerGeneric[TensorSpec] | None = None, + autograph: _bool = True, + jit_compile: _bool | None = None, + reduce_retracing: _bool = False, + experimental_implements: str | None = None, + experimental_autograph_options: tuple[tf.autograph.experimental.Feature, ...] | None = None, + experimental_follow_type_hints: _bool | None = None, +) -> Callable[[Callable[_P, _R]], tf.types.experimental.GenericFunction[_P, _R]]: ... +@overload +def function( + func: Callable[_P, _R], + input_signature: ContainerGeneric[TensorSpec] | None = None, + autograph: _bool = True, + jit_compile: _bool | None = None, + reduce_retracing: _bool = False, + experimental_implements: str | None = None, + experimental_autograph_options: tuple[tf.autograph.experimental.Feature, ...] | None = None, + experimental_follow_type_hints: _bool | None = None, +) -> tf.types.experimental.GenericFunction[_P, _R]: ... +def get_logger() -> Logger: ... +def __getattr__(name: str) -> Any: ... diff --git a/tensorflow/autograph/__init__.pyi b/tensorflow/autograph/__init__.pyi new file mode 100644 index 00000000..c03fbc05 --- /dev/null +++ b/tensorflow/autograph/__init__.pyi @@ -0,0 +1 @@ +from tensorflow.autograph import experimental as experimental diff --git a/tensorflow/autograph/experimental.pyi b/tensorflow/autograph/experimental.pyi new file mode 100644 index 00000000..8e6243c9 --- /dev/null +++ b/tensorflow/autograph/experimental.pyi @@ -0,0 +1,10 @@ +from enum import Enum + +class Feature(Enum): + ALL = "ALL" + AUTO_CONTROL_DEPS = "AUTO_CONTROL_DEPS" + ASSERT_STATEMENTS = "ASSERT_STATEMENTS" + BUILTIN_FUNCTIONS = "BUILTIN_FUNCTIONS" + EQUALITY_OPERATORS = "EQUALITY_OPERATORS" + LISTS = "LISTS" + NAME_SCOPES = "NAME_SCOPES" diff --git a/tensorflow/compat/__init__.pyi b/tensorflow/compat/__init__.pyi new file mode 100644 index 00000000..5112aa40 --- /dev/null +++ b/tensorflow/compat/__init__.pyi @@ -0,0 +1 @@ +from tensorflow.compat import v1 as v1 diff --git a/tensorflow/compat/v1/__init__.pyi b/tensorflow/compat/v1/__init__.pyi new file mode 100644 index 00000000..33c4c6df --- /dev/null +++ b/tensorflow/compat/v1/__init__.pyi @@ -0,0 +1,80 @@ +from typing import Any, Mapping, MutableSequence, Sequence, overload +from typing_extensions import Self + +from types import TracebackType + +from google.protobuf.message import Message + +import numpy as np + +import tensorflow as tf +from tensorflow.compat.v1 import graph_util as graph_util +from tensorflow.compat.v1 import saved_model as saved_model + +from bento.utils.tensor_types import FloatDataSequence + +# Would be better to use mypy-protobuf to make this. +class GraphDef(Message): + node: MutableSequence[NodeDef] + def __getattr__(self, name: str) -> Any: ... + +class MetaGraphDef(Message): ... + +class NodeDef(Message): + name: str + op: str + input: MutableSequence[str] + def __getattr__(self, name: str) -> Any: ... + +class RunOptions(Message): ... +class RunMetadata(Message): ... + +_GraphElement = tf.Tensor | tf.SparseTensor | tf.Operation | str +_FeedElement = float | str | np.ndarray[Any, Any] | FloatDataSequence +# This is a simplification. Key being invariant in a Mapping makes the real type difficult to write. This +# is enough to cover vast majority of use cases. +_FeedDict = Mapping[str, _FeedElement] | Mapping[tf.Tensor, _FeedElement] | Mapping[tf.SparseTensor, _FeedElement] + +class Session: + graph: tf.Graph + graph_def: GraphDef + def __init__( + self, + *, + graph: tf.Graph | None = None, + ) -> None: ... + @overload + def run( + self, + fetches: _GraphElement, + feed_dict: _FeedDict | None = None, + options: RunOptions | None = None, + run_metadata: RunMetadata | None = None, + ) -> np.ndarray[Any, Any]: ... + @overload + def run( + self, + fetches: Sequence[_GraphElement], + feed_dict: _FeedDict | None = None, + options: RunOptions | None = None, + run_metadata: RunMetadata | None = None, + ) -> list[np.ndarray[Any, Any]]: ... + @overload + def run( + self, + fetches: Mapping[str, _GraphElement], + feed_dict: _FeedDict | None = None, + options: RunOptions | None = None, + run_metadata: RunMetadata | None = None, + ) -> dict[str, np.ndarray[Any, Any]]: ... + def __enter__(self) -> Self: ... + def __exit__( + self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None + ) -> None: ... + +def disable_eager_execution() -> None: ... +def disable_v2_behavior() -> None: ... +def global_variables_initializer() -> tf.Operation: ... +def tables_initializer() -> tf.Operation: ... +def get_default_graph() -> tf.Graph: ... +def __getattr__(name: str) -> Any: ... diff --git a/tensorflow/compat/v1/graph_util.pyi b/tensorflow/compat/v1/graph_util.pyi new file mode 100644 index 00000000..1be40f31 --- /dev/null +++ b/tensorflow/compat/v1/graph_util.pyi @@ -0,0 +1,12 @@ +from typing import Iterable + +from tensorflow.compat.v1 import GraphDef, Session + +def extract_sub_graph(graph_def: GraphDef, dest_nodes: Iterable[str]) -> GraphDef: ... +def convert_variables_to_constants( + sess: Session, + input_graph_def: GraphDef, + output_node_names: Iterable[str], + variable_names_whitelist: Iterable[str] | None = None, + variable_names_blacklist: Iterable[str] | None = None, +) -> GraphDef: ... diff --git a/tensorflow/compat/v1/saved_model/__init__.pyi b/tensorflow/compat/v1/saved_model/__init__.pyi new file mode 100644 index 00000000..38981bbf --- /dev/null +++ b/tensorflow/compat/v1/saved_model/__init__.pyi @@ -0,0 +1,17 @@ +from typing import Mapping + +import tensorflow as tf +from tensorflow.compat.v1 import Session +from tensorflow.compat.v1.saved_model import loader as loader +from tensorflow.compat.v1.saved_model import tag_constants as tag_constants +from tensorflow.compat.v1.saved_model.builder import SavedModelBuilder + +Builder = SavedModelBuilder + +def simple_save( + session: Session, + export_dir: str, + inputs: Mapping[str, tf.Tensor], + outputs: Mapping[str, tf.Tensor], + legacy_init_op: tf.Operation | None = None, +) -> None: ... diff --git a/tensorflow/compat/v1/saved_model/builder.pyi b/tensorflow/compat/v1/saved_model/builder.pyi new file mode 100644 index 00000000..aafb8164 --- /dev/null +++ b/tensorflow/compat/v1/saved_model/builder.pyi @@ -0,0 +1,3 @@ +class SavedModelBuilder: + def __init__(self, export_dir: str) -> None: ... + def save(self, as_text: bool = False) -> str: ... diff --git a/tensorflow/compat/v1/saved_model/loader.pyi b/tensorflow/compat/v1/saved_model/loader.pyi new file mode 100644 index 00000000..cbd7d1f0 --- /dev/null +++ b/tensorflow/compat/v1/saved_model/loader.pyi @@ -0,0 +1,7 @@ +from typing import Iterable + +from tensorflow.compat.v1 import MetaGraphDef, Session + +def load( + sess: Session, tags: Iterable[str], export_dir: str, import_scope: str | None = None, **saver_kwargs +) -> MetaGraphDef: ... diff --git a/tensorflow/compat/v1/saved_model/tag_constants.pyi b/tensorflow/compat/v1/saved_model/tag_constants.pyi new file mode 100644 index 00000000..99fd750e --- /dev/null +++ b/tensorflow/compat/v1/saved_model/tag_constants.pyi @@ -0,0 +1,4 @@ +from tensorflow.saved_model import GPU as GPU +from tensorflow.saved_model import SERVING as SERVING +from tensorflow.saved_model import TPU as TPU +from tensorflow.saved_model import TRAINING as TRAINING diff --git a/tensorflow/config/__init__.pyi b/tensorflow/config/__init__.pyi new file mode 100644 index 00000000..e69de29b diff --git a/tensorflow/config/experimental.pyi b/tensorflow/config/experimental.pyi new file mode 100644 index 00000000..58713b99 --- /dev/null +++ b/tensorflow/config/experimental.pyi @@ -0,0 +1 @@ +def enable_op_determinism() -> None: ... diff --git a/tensorflow/data/__init__.pyi b/tensorflow/data/__init__.pyi new file mode 100644 index 00000000..ef11989f --- /dev/null +++ b/tensorflow/data/__init__.pyi @@ -0,0 +1,63 @@ +from typing import Any, Callable, Generic, Iterator, Literal, Sequence, TypeVar +from typing_extensions import Self + +import numpy as np + +from tensorflow import Tensor, TensorCompatibleT, TypeSpec +from tensorflow.data import experimental as experimental + +from bento.utils.tensor_types import ContainerGeneric, ScalarTensorCompatible, TensorCompatible + +_T1 = TypeVar("_T1", covariant=True) +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") + +class Dataset(Generic[_T1]): + element_spec: ContainerGeneric[TypeSpec] + def apply(self: Dataset[_T1], transformation_func: Callable[[Dataset[_T1]], Dataset[_T2]]) -> Dataset[_T2]: ... + def as_numpy_iterator(self) -> Iterator[np.ndarray[Any, Any]]: ... + def batch( + self: Self, + batch_size: ScalarTensorCompatible, + drop_remainder: bool = False, + num_parallel_calls: int | None = None, + deterministic: bool | None = None, + name: str | None = None, + ) -> Self: ... + def cache(self: Self, filename: str = "", name: str | None = None) -> Self: ... + @classmethod + def from_tensor_slices( + cls, tensors: Sequence[TensorCompatibleT] | TensorCompatibleT, name: str | None = None + ) -> Dataset[TensorCompatibleT]: ... + def __iter__(self) -> Iterator[_T1]: ... + def map( + self: Dataset[_T1], + map_func: Callable[[_T1], _T2], + num_parallel_calls: int | None = None, + deterministic: None | bool = None, + name: str | None = None, + ) -> Dataset[_T2]: ... + def prefetch(self: Self, buffer_size: int, name: str | None = None) -> Self: ... + def reduce(self, initial_state: _T2, reduce_func: Callable[[_T2, _T1], _T2], name: str | None = None) -> _T2: ... + def repeat(self: Self, count: int | None = None, name: str | None = None) -> Self: ... + def shard(self: Self, num_shards: int, index: int, name: str | None = None) -> Self: ... + def shuffle( + self: Self, + buffer_size: int, + seed: int | None = None, + reshuffle_each_iteration: bool = True, + name: str | None = None, + ) -> Self: ... + def take(self: Self, count: int, name: str | None = None) -> Self: ... + @staticmethod + def zip(datasets: tuple[Dataset[_T2], Dataset[_T3]], name: str | None = None) -> Dataset[tuple[_T2, _T3]]: ... + +class TFRecordDataset(Dataset[Tensor]): + def __init__( + self, + filenames: TensorCompatible | Dataset[str], + compression_type: Literal["", "ZLIB", "GZIP"] | None = None, + buffer_size: int | None = None, + num_parallel_reads: int | None = None, + name: str | None = None, + ) -> None: ... diff --git a/tensorflow/data/experimental.pyi b/tensorflow/data/experimental.pyi new file mode 100644 index 00000000..c08a43ea --- /dev/null +++ b/tensorflow/data/experimental.pyi @@ -0,0 +1,20 @@ +from typing import Callable, TypeVar + +from tensorflow.data import Dataset + +AUTOTUNE: int +INFINITE_CARDINALITY: int +SHARD_HINT: int +UNKNOWN_CARDINALITY: int + +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") + +def parallel_interleave( + map_func: Callable[[_T1], Dataset[_T2]], + cycle_length: int, + block_length: int = 1, + sloppy: bool | None = False, + buffer_output_elements: int | None = None, + prefetch_input_elements: int | None = None, +) -> Callable[[Dataset[_T1]], Dataset[_T2]]: ... diff --git a/tensorflow/distribute.pyi b/tensorflow/distribute.pyi new file mode 100644 index 00000000..31839fb7 --- /dev/null +++ b/tensorflow/distribute.pyi @@ -0,0 +1,39 @@ +from typing import Generic, Iterator, NamedTuple, TypeVar + +from enum import Enum + +from tensorflow import TypeSpec + +from bento.utils.tensor_types import ContainerGeneric + +class InputContext: + def __init__( + self, num_input_pipelines: int = 1, input_pipeline_id: int = 0, num_replicas_in_sync: int = 1 + ) -> None: ... + @property + def num_input_pipelines(self) -> int: ... + @property + def input_pipeline_id(self) -> int: ... + @property + def num_replicas_in_sync(self) -> int: ... + def get_per_replica_batch_size(self, global_batch_size: int) -> int: ... + +class InputReplicationMode(Enum): + PER_WORKER = "PER_WORKER" + PER_REPLICA = "PER_REPLICA" + +class InputOptions(NamedTuple): + experimental_fetch_to_device: bool | None = None + experimental_replication_mode: InputReplicationMode = InputReplicationMode.PER_WORKER + experimental_place_dataset_on_device: bool = False + experimental_per_replica_buffer_size: int = 1 + +_T1 = TypeVar("_T1", covariant=True) + +class DistributedIterator(Generic[_T1]): + element_spec: ContainerGeneric[TypeSpec] + def __iter__(self) -> Iterator[_T1]: ... + +class DistributedDataset(Generic[_T1]): + element_spec: ContainerGeneric[TypeSpec] + def __iter__(self) -> DistributedIterator[_T1]: ... diff --git a/tensorflow/dtypes.pyi b/tensorflow/dtypes.pyi new file mode 100644 index 00000000..df5b171c --- /dev/null +++ b/tensorflow/dtypes.pyi @@ -0,0 +1,54 @@ +from typing import Any + +from builtins import bool as _bool + +import numpy as np + +from tensorflow import DTypeLike + +# If we want to handle tensors as generic on dtypes we likely need to make +# this class an Enum. That's a minor lie type wise, but Literals only work +# with basic types + enums. +class DType: + @property + def name(self) -> str: ... + @property + def as_numpy_dtype(self) -> type[np.number[Any]]: ... + @property + def is_numpy_compatible(self) -> _bool: ... + @property + def is_bool(self) -> _bool: ... + @property + def is_floating(self) -> _bool: ... + @property + def is_integer(self) -> _bool: ... + @property + def is_quantized(self) -> _bool: ... + @property + def is_unsigned(self) -> _bool: ... + +bool: DType = ... +complex128: DType = ... +complex64: DType = ... +bfloat16: DType = ... +float16: DType = ... +half: DType = ... +float32: DType = ... +float64: DType = ... +double: DType = ... +int8: DType = ... +int16: DType = ... +int32: DType = ... +int64: DType = ... +uint8: DType = ... +uint16: DType = ... +uint32: DType = ... +uint64: DType = ... +qint8: DType = ... +qint16: DType = ... +qint32: DType = ... +quint8: DType = ... +quint16: DType = ... +string: DType = ... + +def as_dtype(dtype: DTypeLike) -> DType: ... diff --git a/tensorflow/errors.pyi b/tensorflow/errors.pyi new file mode 100644 index 00000000..e27843e5 --- /dev/null +++ b/tensorflow/errors.pyi @@ -0,0 +1,3 @@ +from typing import Any + +def __getattr__(name: str) -> Any: ... diff --git a/tensorflow/estimator/__init__.pyi b/tensorflow/estimator/__init__.pyi new file mode 100644 index 00000000..25c09e02 --- /dev/null +++ b/tensorflow/estimator/__init__.pyi @@ -0,0 +1,150 @@ +from typing import Any, Callable, Generic, Iterable, Literal, Mapping, Protocol, Sequence, TypeVar + +from os import PathLike + +import tensorflow as tf +from tensorflow.compat.v1 import Session +from tensorflow.estimator import export as export +from tensorflow.estimator.export import ExportOutput + +from bento.utilities.typehints.recursive_types import ContainerGeneric +from bento.utils.tensor_types import TensorCompatible + +_FeaturesT = TypeVar("_FeaturesT", bound=tf.Tensor | Mapping[str, tf.Tensor], contravariant=True) +_LabelsT = TypeVar("_LabelsT", bound=tf.Tensor | Mapping[str, tf.Tensor], contravariant=True) + +# The duplication is an artifact of there is no nice way to indicate +# an argument may optionally be present. There's 8 possible cases +# and we currently have 3 (full version + 2 used in codebase). +# More cases can be added as needed. +class ModelFn1(Generic[_FeaturesT, _LabelsT], Protocol): + def __call__( + self, + features: _FeaturesT, + labels: _LabelsT | None, + *, + mode: ModeKeysT, + config: RunConfig, + params: dict[str, object], + ) -> EstimatorSpec: ... + +class ModelFn2(Generic[_FeaturesT, _LabelsT], Protocol): + def __call__( + self, + features: _FeaturesT, + labels: _LabelsT | None, + *, + mode: ModeKeysT, + params: dict[str, object], + ) -> EstimatorSpec: ... + +class ModelFn3(Generic[_FeaturesT, _LabelsT], Protocol): + def __call__( + self, + features: _FeaturesT, + labels: _LabelsT | None, + *, + mode: ModeKeysT, + ) -> EstimatorSpec: ... + +class ModelFn4(Generic[_FeaturesT, _LabelsT], Protocol): + def __call__( + self, + features: _FeaturesT, + labels: _LabelsT | None, + ) -> EstimatorSpec: ... + +ModelFn = ( + ModelFn1[_FeaturesT, _LabelsT] + | ModelFn2[_FeaturesT, _LabelsT] + | ModelFn3[_FeaturesT, _LabelsT] + | ModelFn4[_FeaturesT, _LabelsT] +) + +class CheckpointSaverListener: ... +class SessionRunHook: ... + +class Estimator(Generic[_FeaturesT, _LabelsT]): + def __init__( + self, + model_fn: ModelFn[_FeaturesT, _LabelsT], + model_dir: PathLike[str] | str | None = None, + config: RunConfig | None = None, + params: Mapping[str, object] | None = None, + warm_start_from: str | WarmStartSettings | None = None, + ) -> None: ... + def _add_meta_graph_for_mode( + self, + builder: tf.compat.v1.saved_model.Builder, + input_receiver_fn_map: dict[ModeKeysT, Callable[[], tf.estimator.export.ServingInputReceiver]], + checkpoint_path: str, + save_variables: bool = True, + mode: ModeKeysT = ModeKeys.PREDICT, + export_tags: Iterable[str] | None = None, + check_variables: bool = True, + strip_default_attrs: bool = True, + ): ... + def train( + self, + input_fn: Callable[[], tf.data.Dataset[tuple[_FeaturesT, _LabelsT] | tuple[_FeaturesT, _LabelsT]]], + hooks: Sequence[SessionRunHook] | None = None, + steps: int | None = None, + max_steps: int | None = None, + saving_listeners: Sequence[CheckpointSaverListener] | None = None, + ): ... + def __getattr__(self, name: str) -> Any: ... + +class RunConfig: + def __getattr__(self, name: str) -> Any: ... + +class WarmStartSettings: + def __getattr__(self, name: str) -> Any: ... + +class EstimatorSpec: + def __init__( + self, + mode: ModeKeysT, + predictions: tf.Tensor | Mapping[str, tf.Tensor] | None = None, + loss: tf.Tensor | None = None, + train_op: tf.Operation | None = None, + export_outputs: Mapping[str, ExportOutput] | None = None, + training_chief_hooks: Iterable[SessionRunHook] | None = None, + training_hooks: Iterable[SessionRunHook] | None = None, + evaluation_hooks: Iterable[SessionRunHook] | None = None, + prediction_hooks: Iterable[SessionRunHook] | None = None, + ) -> None: ... + def __getattr__(self, name: str) -> Any: ... + +class SessionRunContext: + def __init__(self, original_args: SessionRunArgs, session: Session) -> None: ... + @property + def original_args(self) -> SessionRunArgs: ... + @property + def session(self) -> Session: ... + @property + def stop_requested(self) -> bool: ... + def request_stop(self) -> None: ... + +class SessionRunValues: ... + +FetchT = tf.Tensor | tf.Operation | tf.SparseTensor | str + +class SessionRunArgs: + def __init__( + self, + fetches: ContainerGeneric[FetchT], + feed_dict: Mapping[tf.Tensor, TensorCompatible] | None = None, + ) -> None: ... + @property + def fetches(self) -> ContainerGeneric[FetchT]: ... + @property + def feed_dict(self) -> Mapping[tf.Tensor, TensorCompatible]: ... + +class ModeKeys: + TRAIN = "train" + EVAL = "eval" + PREDICT = "infer" + +ModeKeysT = Literal["train", "eval", "infer"] + +def __getattr__(name: str) -> Any: ... diff --git a/tensorflow/estimator/export.pyi b/tensorflow/estimator/export.pyi new file mode 100644 index 00000000..b35852db --- /dev/null +++ b/tensorflow/estimator/export.pyi @@ -0,0 +1,14 @@ +from typing import Any, Generic, TypeVar + +from tensorflow import Tensor + +class ServingInputReceiver: + def __getattr__(self, name: str) -> Any: ... + +_T = TypeVar("_T", bound=Tensor | dict[str, Tensor]) + +class ExportOutput: ... + +class PredictOutput(Generic[_T], ExportOutput): + outputs: _T + def __init__(self, outputs: _T) -> None: ... diff --git a/tensorflow/graph_util.pyi b/tensorflow/graph_util.pyi new file mode 100644 index 00000000..78f06222 --- /dev/null +++ b/tensorflow/graph_util.pyi @@ -0,0 +1,26 @@ +from typing import Mapping, overload + +from tensorflow import Operation, Tensor +from tensorflow.compat.v1 import GraphDef + +@overload +def import_graph_def( + graph_def: GraphDef, + input_map: Mapping[str, Tensor] | None, + return_elements: list[str], + name: str | None = None, +) -> list[Operation | Tensor]: ... +@overload +def import_graph_def( + graph_def: GraphDef, + *, + return_elements: list[str], + name: str | None = None, +) -> list[Operation | Tensor]: ... +@overload +def import_graph_def( + graph_def: GraphDef, + input_map: Mapping[str, Tensor] | None = None, + return_elements: None = None, + name: str | None = None, +) -> None: ... diff --git a/tensorflow/initializers.pyi b/tensorflow/initializers.pyi new file mode 100644 index 00000000..3caa7d18 --- /dev/null +++ b/tensorflow/initializers.pyi @@ -0,0 +1 @@ +from tensorflow.keras.initializers import * diff --git a/tensorflow/io.pyi b/tensorflow/io.pyi new file mode 100644 index 00000000..e27843e5 --- /dev/null +++ b/tensorflow/io.pyi @@ -0,0 +1,3 @@ +from typing import Any + +def __getattr__(name: str) -> Any: ... diff --git a/tensorflow/keras/__init__.pyi b/tensorflow/keras/__init__.pyi new file mode 100644 index 00000000..4c2f32c0 --- /dev/null +++ b/tensorflow/keras/__init__.pyi @@ -0,0 +1,217 @@ +from typing import Any, Callable, Generic, Iterable, Iterator, Literal, Mapping, Sequence, TypeVar, overload + +from os import PathLike + +import numpy as np +from matplotlib.container import Container + +import tensorflow as tf +from tensorflow.data import Dataset +from tensorflow.keras import activations as activations +from tensorflow.keras import applications as applications +from tensorflow.keras import callbacks as callbacks +from tensorflow.keras import constraints as constraints +from tensorflow.keras import initializers as initializers +from tensorflow.keras import layers as layers +from tensorflow.keras import losses as losses +from tensorflow.keras import metrics as metrics +from tensorflow.keras import models as models +from tensorflow.keras import optimizers as optimizers +from tensorflow.keras import regularizers as regularizers +from tensorflow.keras import utils as utils +from tensorflow.keras.callbacks import Callback, CallbackList, History +from tensorflow.keras.layers import InputSpec, Layer, _InputT # type: ignore +from tensorflow.keras.utils import Sequence as KerasSequence +from tensorflow.keras.utils.experimental import DatasetCreator +from tensorflow.saved_model import SaveOptions +from tensorflow.types.experimental import ConcreteFunction, GenericFunction + +from bento.utils.tensor_types import ContainerGeneric, DTypeLike, ShapeLike, TensorCompatible, any_array, float_array + +_Loss = str | tf.keras.losses.Loss | Callable[[TensorCompatible, TensorCompatible], tf.Tensor] +_Metric = str | tf.keras.metrics.Metric | Callable[[TensorCompatible, TensorCompatible], tf.Tensor] | None + +# Models are invariant unlike layers due to fit/evaluate. +_OutputT = TypeVar("_OutputT") +_Verbosity = Literal["auto", 0, 1, 2] +_ValidationDataT = ( + ContainerGeneric[any_array] + | tuple[_InputT, _OutputT] + | tuple[_InputT, _OutputT, tf.Tensor] + | Dataset[tuple[_InputT, _OutputT]] + | Dataset[tuple[_InputT, _OutputT, tf.Tensor]] + | Sequence[tuple[_InputT, _OutputT]] + | Sequence[tuple[_InputT, _OutputT, tf.Tensor]] + | None +) + +class _LossesContainer(Generic[_OutputT]): + def __call__( + self, + y_true: _OutputT, + y_pred: _OutputT, + sample_weight: tf.Tensor | _OutputT | None = None, + regularization_losses: Iterable[tf.Tensor] | None = None, + ): ... + +class Model(Layer[_InputT, _OutputT]): + # Ideally loss/metrics/output would share + # the same structure but higher kinded types + # are not supported. + def compile( + self, + optimizer: str | tf.optimizers.Optimizer, + loss: ContainerGeneric[_Loss], + metrics: ContainerGeneric[Sequence[_Metric]] | None = None, + loss_weights: ContainerGeneric[float] | None = None, + weighted_metrics: ContainerGeneric[str] | None = None, + run_eagerly: bool = False, + steps_per_execution: int = 1, + jit_compile: bool = False, + ) -> None: ... + def compute_loss( + self, + x: _InputT | None = None, + y: _OutputT | None = None, + y_pred: _OutputT | None = None, + sample_weight: tf.Tensor | _OutputT | None = None, + ): ... + # The overloads mainly capture relationship between first argument + # and the rest. There are other relationships between the arguments + # that could be captured with more overloads. Fully specifying all + # relationships is likely 10-20 overloads. + @overload + def fit( + self, + x: ContainerGeneric[any_array], + y: ContainerGeneric[any_array], + *, + batch_size: int | None = None, + epochs: int = 1, + verbose: _Verbosity = "auto", + callbacks: CallbackList | Sequence[Callback] | None = None, + validation_split: float = 0.0, + validation_data: _ValidationDataT[_InputT, _OutputT] = None, + shuffle: bool = True, + class_weight: Mapping[int, float] | None = None, + sample_weight: float_array | None = None, + initial_epoch: int = 0, + steps_per_epoch: int | None = None, + validation_steps: int | None = None, + validation_batch_size: int | None = None, + validation_freq: int | Container[int] = 1, + ) -> History: ... + @overload + def fit( + self, + x: _InputT, + y: _OutputT, + *, + batch_size: int | None = None, + epochs: int = 1, + verbose: _Verbosity = "auto", + callbacks: CallbackList | Sequence[Callback] | None = None, + validation_split: float = 0.0, + validation_data: _ValidationDataT[_InputT, _OutputT] = None, + shuffle: bool = True, + class_weight: Mapping[int, float] | None = None, + sample_weight: float_array | None = None, + initial_epoch: int = 0, + steps_per_epoch: int | None = None, + validation_steps: int | None = None, + validation_batch_size: int | None = None, + validation_freq: int | Container[int] = 1, + ) -> History: ... + @overload + def fit( + self, + # Third generic argument is optional sample weights. + # It can either be one tensor shared for all outputs + # or a container of tensors matching the output structure. + x: Dataset[tuple[_InputT, _OutputT]] + | Dataset[tuple[_InputT, _OutputT, tf.Tensor]] + | Dataset[tuple[_InputT, _OutputT, _OutputT]] + | Iterator[tuple[_InputT, _OutputT]] + | Iterator[tuple[_InputT, _OutputT, tf.Tensor]] + | Iterator[tuple[_InputT, _OutputT, _OutputT]] + | KerasSequence[tuple[_InputT, _OutputT]] + | KerasSequence[tuple[_InputT, _OutputT, tf.Tensor]] + | KerasSequence[tuple[_InputT, _OutputT, _OutputT]] + | DatasetCreator[tuple[_InputT, _OutputT]] + | DatasetCreator[tuple[_InputT, _OutputT, tf.Tensor]] + | DatasetCreator[tuple[_InputT, _OutputT, _OutputT]], + *, + epochs: int = 1, + verbose: _Verbosity = "auto", + callbacks: CallbackList | Sequence[Callback] | None = None, + validation_data: _ValidationDataT[_InputT, _OutputT] = None, + class_weight: Mapping[int, float] | None = None, + initial_epoch: int = 0, + steps_per_epoch: int | None = None, + validation_steps: int | None = None, + validation_batch_size: int | None = None, + validation_freq: int | Container[int] = 1, + max_queue_size: int = 10, + workers: int = 1, + use_multiprocessing: bool = False, + ) -> History: ... + # Fix type Any later + def predict( + self, + x: Any, + batch_size: int | None = None, + verbose: str = "auto", + steps: int | None = None, + callbacks: CallbackList | Sequence[Callback] | None = None, + max_queue_size: int = 10, + workers: int = 1, + use_multiprocessing: bool = False, + ) -> Any: ... + def call(self, inputs: _InputT) -> _OutputT: ... + def compute_output_shape(self, input_shape: Any) -> Any: ... + def save( + self, + filepath: str | PathLike[str], + overwrite: bool = True, + include_optimizer: bool = True, + save_format: Literal["h5", "tf", None] = None, + signatures: ConcreteFunction[..., object] + | GenericFunction[..., object] + | Mapping[str, ConcreteFunction[..., object] | GenericFunction[..., object]] + | None = None, + options: SaveOptions | None = None, + save_traces: bool = True, + ) -> None: ... + def compiled_loss( + self, + y: _OutputT, + y_pred: _OutputT, + sample_weight: tf.Tensor | _OutputT, + regularization_losses: Sequence[tf.Tensor], + ) -> tf.Tensor: ... + +@overload +def Input( + *, + name: str | None = None, + type_spec: tf.TypeSpec | None = None, +) -> tf.Tensor: ... +@overload +def Input( + shape: ShapeLike | None = None, + batch_size: int | None = None, + name: str | None = None, + dtype: DTypeLike | None = None, + sparse: bool | None = None, + tensor: tf.Tensor | None = None, + ragged: bool | None = None, +) -> tf.Tensor: ... + +_T = TypeVar("_T", bound=Layer[tf.Tensor, tf.Tensor]) + +class Sequential(Model[tf.Tensor, tf.Tensor], Generic[_T]): + input_spec: InputSpec + layers: list[_T] + def __init__(self, layers: Sequence[_T] | None = None, name: str | None = None) -> None: ... + def compute_output_shape(self, input_shape: tf.TensorShape) -> tf.TensorShape: ... + def call(self, inputs: tf.Tensor, training: bool | None = None) -> tf.Tensor: ... diff --git a/tensorflow/keras/activations.pyi b/tensorflow/keras/activations.pyi new file mode 100644 index 00000000..b3a1cff1 --- /dev/null +++ b/tensorflow/keras/activations.pyi @@ -0,0 +1,7 @@ +from typing import Any, Callable, Dict, TypedDict + +from tensorflow import Tensor + +ActivationT = str | None | Callable[[Tensor], Tensor] | Dict[str, Any] | TypedDict + +def get(identifier: ActivationT) -> Callable[[Tensor], Tensor]: ... diff --git a/tensorflow/keras/applications.pyi b/tensorflow/keras/applications.pyi new file mode 100644 index 00000000..e69de29b diff --git a/tensorflow/keras/callbacks.pyi b/tensorflow/keras/callbacks.pyi new file mode 100644 index 00000000..9ac911b3 --- /dev/null +++ b/tensorflow/keras/callbacks.pyi @@ -0,0 +1,100 @@ +from typing import Any, Iterable, Literal, Mapping, Sequence + +from tensorflow.keras import Model +from tensorflow.keras.optimizers.schedules import LearningRateSchedule + +class Callback: + model: Model[Any, object] + params: dict[str, Any] + def set_model(self, model: Model[Any, object]) -> None: ... + def set_params(self, params: dict[str, Any]) -> None: ... + def on_batch_begin(self, batch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_batch_end(self, batch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_epoch_begin(self, epoch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_epoch_end(self, epoch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_predict_batch_begin(self, batch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_predict_batch_end(self, batch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_predict_begin(self, logs: Mapping[str, object] | None = None) -> None: ... + def on_predict_end(self, logs: Mapping[str, object] | None = None) -> None: ... + def on_test_batch_begin(self, batch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_test_batch_end(self, batch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_test_begin(self, logs: Mapping[str, object] | None = None) -> None: ... + def on_test_end(self, logs: Mapping[str, object] | None = None) -> None: ... + def on_train_batch_begin(self, batch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_train_batch_end(self, batch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_train_begin(self, logs: Mapping[str, object] | None = None) -> None: ... + def on_train_end(self, logs: Mapping[str, object] | None = None) -> None: ... + +# A CallbackList has exact same api as a callback, +# but does not actually subclass it. +class CallbackList: + def __init__( + self, + callbacks: Sequence[Callback] | None = None, + add_history: bool = False, + add_progbar: bool = False, + model: Model[object, object] | None = None, + **params: Any, + ) -> None: ... + def set_model(self, model: Model[object, object]) -> None: ... + def set_params(self, params: dict[str, Any]) -> None: ... + def on_batch_begin(self, batch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_batch_end(self, batch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_epoch_begin(self, epoch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_epoch_end(self, epoch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_predict_batch_begin(self, batch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_predict_batch_end(self, batch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_predict_begin(self, logs: Mapping[str, object] | None = None) -> None: ... + def on_predict_end(self, logs: Mapping[str, object] | None = None) -> None: ... + def on_test_batch_begin(self, batch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_test_batch_end(self, batch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_test_begin(self, logs: Mapping[str, object] | None = None) -> None: ... + def on_test_end(self, logs: Mapping[str, object] | None = None) -> None: ... + def on_train_batch_begin(self, batch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_train_batch_end(self, batch: int, logs: Mapping[str, object] | None = None) -> None: ... + def on_train_begin(self, logs: Mapping[str, object] | None = None) -> None: ... + def on_train_end(self, logs: Mapping[str, object] | None = None) -> None: ... + +class History(Callback): + + history: dict[str, list[Any]] + +class ModelCheckpoint(Callback): + monitor_op: Any + +class TensorBoard(Callback): + _start_batch: int + _stop_batch: int + def __init__( + self, + log_dir: str = "logs", + histogram_freq: int = 0, + write_graph: bool = True, + write_images: bool = False, + write_steps_per_second: bool = False, + update_freq: str | int = "epoch", + profile_batch: int | tuple[int, int] = 0, + embeddings_freq: int = 0, + embeddings_metadata: dict[str, None] | None = None, + ) -> None: ... + +class BackupAndRestore(Callback): + def __init__(self, backup_dir: str) -> None: ... + +class BaseLogger(Callback): + def __init__(self, stateful_metrics: Iterable[str] | None) -> None: ... + +class CSVLogger(Callback): + def __init__(self, filename: str, separator: str = ",", append: bool = False) -> None: ... + +class EarlyStopping(Callback): + monitor_op: Any + +class LearningRateScheduler(Callback): + def __init__(self, schedule: LearningRateSchedule, verbose: Literal[0, 1] = 0) -> None: ... + +class ProgbarLogger(Callback): + use_steps: bool + def __init__( + self, count_mode: Literal["steps", "samples"] = "samples", stateful_metrics: Iterable[str] | None = None + ) -> None: ... diff --git a/tensorflow/keras/constraints.pyi b/tensorflow/keras/constraints.pyi new file mode 100644 index 00000000..ec61be55 --- /dev/null +++ b/tensorflow/keras/constraints.pyi @@ -0,0 +1,16 @@ +from typing import Any, Callable, overload + +from tensorflow import Tensor + +class Constraint: + def get_config(self) -> dict[str, Any]: ... + def __call__(self, w: Tensor, /) -> Tensor: ... + +ConstraintT = str | dict[str, Any] | Constraint | None + +@overload +def get(identifer: None) -> None: ... +@overload +def get(identifer: Callable[[Tensor], Tensor]) -> Callable[[Tensor], Tensor]: ... +@overload +def get(identifer: str | dict[str, Any]) -> Constraint: ... diff --git a/tensorflow/keras/initializers.pyi b/tensorflow/keras/initializers.pyi new file mode 100644 index 00000000..2b4a0437 --- /dev/null +++ b/tensorflow/keras/initializers.pyi @@ -0,0 +1,44 @@ +# TODO: Fill in remaining initializers +from typing import Any, overload +from typing_extensions import Self + +import numpy as np + +from tensorflow import DTypeLike, ShapeLike, Tensor, TensorCompatible + +from bento.arguments import Config + +class Initializer: + def __call__(self, shape: ShapeLike, dtype: DTypeLike | None = None) -> Tensor: ... + def get_config(self) -> dict[str, Config]: ... + @classmethod + def from_config(cls: type[Self], config: dict[str, Config]) -> Self: ... + +class Constant(Initializer): + def __init__(self, value: TensorCompatible) -> None: ... + +class GlorotNormal(Initializer): + def __init__(self, seed: int | None = None) -> None: ... + +class GlorotUniform(Initializer): + def __init__(self, seed: int | None = None) -> None: ... + +class TruncatedNormal(Initializer): + def __init__(self, mean: TensorCompatible = 0.0, stddev: TensorCompatible = 0.05, seed: int | None = None): ... + +class Zeros(Initializer): + def __init__(self) -> None: ... + +constant = Constant +glorot_normal = GlorotNormal +glorot_uniform = GlorotUniform +truncated_normal = TruncatedNormal +zeros = Zeros + +InitializerT = str | Initializer | dict[str, Any] | None + +@overload +def get(identifier: None) -> None: ... +@overload +def get(identifier: str | Initializer | dict[str, Any]) -> Initializer: ... +def __getattr__(name: str) -> Any: ... diff --git a/tensorflow/keras/layers.pyi b/tensorflow/keras/layers.pyi new file mode 100644 index 00000000..5b033087 --- /dev/null +++ b/tensorflow/keras/layers.pyi @@ -0,0 +1,131 @@ +from typing import Any, Callable, Generic, Iterable, Sequence, TypeVar, final, overload +from typing_extensions import Self + +from abc import ABC, abstractmethod + +import numpy as np + +import tensorflow as tf +from tensorflow import DTypeLike, Tensor, Variable +from tensorflow.keras.activations import ActivationT +from tensorflow.keras.constraints import ConstraintT +from tensorflow.keras.initializers import InitializerT +from tensorflow.keras.regularizers import RegularizerT + +from bento.utils.tensor_types import ContainerInputSpec, TensorCompatible + +_InputT = TypeVar("_InputT", contravariant=True) +_OutputT = TypeVar("_OutputT", covariant=True) + +class InputSpec: + dtype: str | None + shape: tuple[int | None, ...] + ndim: int | None + max_ndim: int | None + min_ndim: int | None + axes: dict[int, int | None] | None + def __init__( + self, + dtype: tf.DTypeLike | None = None, + shape: Iterable[int | None] | None = None, + ndim: int | None = None, + max_ndim: int | None = None, + min_ndim: int | None = None, + axes: dict[int, int | None] | None = None, + allow_last_axis_squeeze: bool = False, + ): ... + +class Layer(Generic[_InputT, _OutputT], tf.Module, ABC): + input_spec: ContainerInputSpec + trainable: bool + def __init__( + self, trainable: bool = True, name: str | None = None, dtype: DTypeLike | None = None, dynamic: bool = False + ) -> None: ... + @final + def __call__(self, inputs: _InputT, *, training: bool = False) -> _OutputT: ... + @overload + def build(self: Layer[tf.Tensor, object], input_shape: tf.TensorShape) -> None: ... + @overload + def build(self, input_shape: Any) -> None: ... + # Real type here in _InputShapeT and _OutputShapeT. If Higher order kinds + # existed we could derive these from the input and output types. Without them + # we would need to make this class have more generic arguments. Overloads at least + # handle one common case. + @overload + @abstractmethod + def compute_output_shape(self, input_shape: Any) -> Any: ... + @overload + @abstractmethod + def compute_output_shape(self: Layer[tf.Tensor, tf.Tensor], input_shape: tf.TensorShape) -> tf.TensorShape: ... + def add_weight( + self, + name: str | None = None, + shape: Iterable[int | None] | None = None, + dtype: tf.DTypeLike | None = None, + initializer: InitializerT | None = None, + regularizer: tf.keras.regularizers.Regularizer | Callable[[tf.Tensor], tf.Tensor] | None = None, + constraint: tf.keras.constraints.Constraint | Callable[[tf.Tensor], tf.Tensor] | None = None, + trainable: bool | None = None, + ) -> tf.Variable: ... + @abstractmethod + def call(self, inputs: _InputT) -> _OutputT: ... + def count_params(self) -> int: ... + @property + def trainable_variables(self) -> list[Variable]: ... + @property + def non_trainable_variables(self) -> list[Variable]: ... + @property + def trainable_weights(self) -> list[Variable]: ... + @property + def non_trainable_weights(self) -> list[Variable]: ... + @property + def losses(self) -> list[Tensor]: ... + built: bool + def get_weights(self) -> list[np.ndarray[Any, np.dtype[np.float32]]]: ... + def set_weights(self, weights: list[np.ndarray[Any, np.dtype[np.float32]]]) -> None: ... + def get_config(self) -> dict[str, Any]: ... + @classmethod + def from_config(cls: type[Self], config: dict[str, Any]) -> Self: ... + +class Dense(Layer[tf.Tensor, tf.Tensor]): + input_spec: InputSpec + def __init__( + self, + units: int, + activation: ActivationT = None, + use_bias: bool = True, + kernel_initializer: InitializerT = "glorot_uniform", + bias_initializer: InitializerT = "zeros", + kernel_regularizer: RegularizerT = None, + bias_regularizer: RegularizerT = None, + activity_regularizer: RegularizerT = None, + kernel_constraint: ConstraintT = None, + bias_constraint: ConstraintT = None, + name: str | None = None, + ) -> None: ... + def compute_output_shape(self, input_shape: tf.TensorShape) -> tf.TensorShape: ... + def call(self, inputs: tf.Tensor) -> tf.Tensor: ... + +class ReLU(Layer[tf.Tensor, tf.Tensor]): + def __init__( + self, + max_value: float | None = None, + negative_slope: float | None = 0.0, + threshold: float | None = 0.0, + name: str | None = None, + ) -> None: ... + def compute_output_shape(self, input_shape: tf.TensorShape) -> tf.TensorShape: ... + def call(self, inputs: tf.Tensor) -> tf.Tensor: ... + +class Dropout(Layer[tf.Tensor, tf.Tensor]): + def __init__( + self, + rate: float, + noise_shape: TensorCompatible | Sequence[int | None] | None = None, + seed: int | None = None, + name: str | None = None, + ) -> None: ... + def compute_output_shape(self, input_shape: tf.TensorShape) -> tf.TensorShape: ... + def call(self, inputs: tf.Tensor) -> tf.Tensor: ... + +def __getattr__(name: str) -> Any: ... diff --git a/tensorflow/keras/losses.pyi b/tensorflow/keras/losses.pyi new file mode 100644 index 00000000..b6ecfb06 --- /dev/null +++ b/tensorflow/keras/losses.pyi @@ -0,0 +1,42 @@ +from typing import Any, Literal +from typing_extensions import Self, TypeGuard + +from abc import ABC, abstractmethod + +from tensorflow import Tensor + +from bento.utils.tensor_types import TensorCompatible + +class Loss(ABC): + reduction: _ReductionValues + name: str | None + def __init__(self, reduction: _ReductionValues = "auto", name: str | None = None): ... + @abstractmethod + def call(self, y_true: Tensor, y_pred: Tensor) -> Tensor: ... + @classmethod + def from_config(cls: type[Self], config: dict[str, Any]) -> Self: ... + def get_config(self) -> dict[str, Any]: ... + def __call__( + self, y_true: TensorCompatible, y_pred: TensorCompatible, sample_weight: TensorCompatible | None = None + ) -> Tensor: ... + +class BinaryCrossentropy(Loss): + def call(self, y_true: Tensor, y_pred: Tensor) -> Tensor: ... + +class MeanSquaredError(Loss): + def call(self, y_true: Tensor, y_pred: Tensor) -> Tensor: ... + +class MeanSquaredLogarithmicError(Loss): + def call(self, y_true: Tensor, y_pred: Tensor) -> Tensor: ... + +class Reduction: + AUTO = "auto" + NONE = "none" + SUM = "sum" + SUM_OVER_BATCH_SIZE = "sum_over_batch_size" + @classmethod + def all(cls) -> tuple[_ReductionValues, ...]: ... + @classmethod + def validate(cls, key: object) -> TypeGuard[_ReductionValues]: ... + +_ReductionValues = Literal["auto", "none", "sum", "sum_over_batch_size"] diff --git a/tensorflow/keras/metrics.pyi b/tensorflow/keras/metrics.pyi new file mode 100644 index 00000000..f2d2b252 --- /dev/null +++ b/tensorflow/keras/metrics.pyi @@ -0,0 +1,25 @@ +from typing import Iterable, TypeVar +from typing_extensions import Self + +from abc import ABC, abstractmethod + +import tensorflow as tf +from tensorflow import Operation, Tensor +from tensorflow.keras.layers import Layer + +_OutputT = TypeVar("_OutputT", bound=Tensor | dict[str, Tensor]) + +class Metric(Layer[Tensor, _OutputT], ABC): + def merge_state(self, metrics: Iterable[Self]) -> list[Operation]: ... + def reset_state(self) -> None: ... + @abstractmethod + def update_state(self) -> Operation: ... + @abstractmethod + def result(self) -> _OutputT: ... + def call(self, inputs: Tensor) -> _OutputT: ... + +class AUC(Metric[tf.Tensor]): + def __init__(self, num_thresholds: int = 200, name: str | None = None): ... + def update_state(self) -> Operation: ... + def result(self) -> tf.Tensor: ... + def compute_output_shape(self): ... diff --git a/tensorflow/keras/models.pyi b/tensorflow/keras/models.pyi new file mode 100644 index 00000000..9324ad0b --- /dev/null +++ b/tensorflow/keras/models.pyi @@ -0,0 +1,8 @@ +from typing import Any, Mapping + +from tensorflow.keras import Model as Model + +# Fix type Any later +def load_model( + filepath: str, custom_objects: Mapping[str, object] | None = None, compile: bool = True, options: Any = None +) -> Any: ... diff --git a/tensorflow/keras/optimizers/__init__.pyi b/tensorflow/keras/optimizers/__init__.pyi new file mode 100644 index 00000000..cc0a2ad8 --- /dev/null +++ b/tensorflow/keras/optimizers/__init__.pyi @@ -0,0 +1,71 @@ +from __future__ import annotations + +from typing import Any, Callable, Iterable +from typing_extensions import Self, TypeAlias + +from abc import ABC + +import tensorflow as tf +from tensorflow.keras.optimizers import schedules as schedules + +_InitializerT = str | Callable[[], tf.Tensor] | dict[str, Any] +_ShapeT: TypeAlias = tf.TensorShape | Iterable[int | None] +_DtypeT: TypeAlias = tf.DType | str | None +_GradientsT: TypeAlias = tf.Tensor | tf.IndexedSlices + +class Optimizer(ABC): + @property + def name(self) -> str: ... + def __init__(self, *args: object, **kwargs: object) -> None: ... + def add_slot( + self, + var: tf.Variable, + slot_name: str, + initializer: _InitializerT = "zeros", + shape: tf.TensorShape | None = None, + ) -> tf.Variable: ... + def add_weight( + self, + name: str, + shape: _ShapeT, + dtype: _DtypeT = None, + trainable: None | bool = None, + synchronization: tf.VariableSynchronization = tf.VariableSynchronization.AUTO, + aggregation: tf.VariableAggregation = tf.VariableAggregation.NONE, + ) -> tf.Variable: ... + def apply_gradients( + self, + grads_and_vars: Iterable[tuple[_GradientsT, tf.Variable]], + name: str | None = None, + experimental_aggregate_gradients: bool = True, + ) -> tf.Operation: ... + @classmethod + def from_config(cls: type[Self], config: dict[str, Any], custom_objects: dict[str, type] | None = None) -> Self: ... + def get_config(self) -> dict[str, Any]: ... + def get_slot(self, var: tf.Variable, slot_name: str) -> tf.Variable: ... + def get_slot_names(self) -> list[str]: ... + def get_gradients(self, loss: tf.Tensor, params: list[tf.Variable]) -> list[tf.Tensor]: ... + def minimize( + self, + loss: tf.Tensor | Callable[[], tf.Tensor], + var_list: list[tf.Variable] + | tuple[tf.Variable, ...] + | Callable[[], list[tf.Variable] | tuple[tf.Variable, ...]], + grad_loss: tf.Tensor | None = None, + name: str | None = None, + tape: tf.GradientTape | None = None, + ) -> tf.Operation: ... + def variables(self) -> list[tf.Variable]: ... + +class Adam(Optimizer): + def __init__( + self, + learning_rate: float = 0.001, + beta_1: float = 0.9, + beta_2: float = 0.999, + epsilon: float = 1e-07, + amsgrad: bool = False, + name: str | None = "Adam", + ) -> None: ... + +class Adagrad(Optimizer): ... diff --git a/tensorflow/keras/optimizers/schedules.pyi b/tensorflow/keras/optimizers/schedules.pyi new file mode 100644 index 00000000..ab410226 --- /dev/null +++ b/tensorflow/keras/optimizers/schedules.pyi @@ -0,0 +1,97 @@ +from typing import Any, Sequence +from typing_extensions import Self + +from abc import ABC, abstractmethod + +import tensorflow as tf + +class LearningRateSchedule(ABC): + @abstractmethod + def __call__(self, step: int | tf.Tensor) -> float | tf.Tensor: ... + @abstractmethod + def get_config(self) -> dict[str, Any]: ... + @classmethod + def from_config(cls: type[Self], config: dict[str, Any]) -> Self: ... + +class PiecewiseConstantDecay(LearningRateSchedule): + def __init__( + self, + boundaries: Sequence[tf.Tensor] | Sequence[float], + values: Sequence[float] | Sequence[tf.Tensor], + name: str | None = None, + ) -> None: ... + def __call__(self, step: int | tf.Tensor) -> float | tf.Tensor: ... + def get_config(self) -> dict[str, Any]: ... + @classmethod + def from_config(cls: type[Self], config: dict[str, Any]) -> Self: ... + +class InverseTimeDecay(LearningRateSchedule): + def __init__( + self, + initial_learning_rate: float | tf.Tensor, + decay_steps: int, + decay_rate: float, + staircase: bool = False, + name: str | None = None, + ) -> None: ... + def __call__(self, step: int | tf.Tensor) -> float | tf.Tensor: ... + def get_config(self) -> dict[str, Any]: ... + @classmethod + def from_config(cls: type[Self], config: dict[str, Any]) -> Self: ... + +class PolynomialDecay(LearningRateSchedule): + def __init__( + self, + initial_learning_rate: float | tf.Tensor, + decay_steps: int, + end_learning_rate: float | tf.Tensor, + power: float = 1.0, + cycle: bool = False, + name: str | None = None, + ) -> None: ... + def __call__(self, step: int | tf.Tensor) -> float | tf.Tensor: ... + def get_config(self) -> dict[str, Any]: ... + @classmethod + def from_config(cls: type[Self], config: dict[str, Any]) -> Self: ... + +class CosineDecay(LearningRateSchedule): + def __init__( + self, + initial_learning_rate: float | tf.Tensor, + decay_steps: int, + alpha: float | tf.Tensor = 0.0, + name: str | None = None, + ) -> None: ... + def __call__(self, step: int | tf.Tensor) -> float | tf.Tensor: ... + def get_config(self) -> dict[str, Any]: ... + @classmethod + def from_config(cls: type[Self], config: dict[str, Any]) -> Self: ... + +class CosineDecayRestarts(LearningRateSchedule): + def __init__( + self, + initial_learning_rate: float | tf.Tensor, + decay_steps: int | tf.Tensor, + t_mul: float | tf.Tensor = 1.0, + m_mul: float | tf.Tensor = 1.0, + alpha: float | tf.Tensor = 0.0, + name: str | None = None, + ) -> None: ... + def __call__(self, step: int | tf.Tensor) -> float | tf.Tensor: ... + def get_config(self) -> dict[str, Any]: ... + @classmethod + def from_config(cls: type[Self], config: dict[str, Any]) -> Self: ... + +class ExponentialDecay(LearningRateSchedule): + def __init__( + self, + initial_learning_rate: float | tf.Tensor, + decay_steps: int | tf.Tensor, + decay_rate: float | tf.Tensor, + staircase: bool = False, + name: str | None = None, + ) -> None: ... + def __call__(self, step: int | tf.Tensor) -> float | tf.Tensor: ... + def get_config(self) -> dict[str, Any]: ... + @classmethod + def from_config(cls: type[Self], config: dict[str, Any]) -> Self: ... diff --git a/tensorflow/keras/regularizers.pyi b/tensorflow/keras/regularizers.pyi new file mode 100644 index 00000000..732016de --- /dev/null +++ b/tensorflow/keras/regularizers.pyi @@ -0,0 +1,20 @@ +from typing import Any, Callable, overload +from typing_extensions import Self + +from tensorflow import Tensor + +class Regularizer: + def get_config(self) -> dict[str, Any]: ... + @classmethod + def from_config(cls, config: dict[str, Any]) -> Self: ... + def __call__(self, x: Tensor, /) -> Tensor: ... + +RegularizerT = str | dict[str, Any] | Regularizer | None + +@overload +def get(identifer: None) -> None: ... +@overload +def get(identifer: Callable[[Tensor], Tensor]) -> Callable[[Tensor], Tensor]: ... +@overload +def get(identifer: str | dict[str, Any]) -> Regularizer: ... +def __getattr__(name: str) -> Any: ... diff --git a/tensorflow/keras/utils/__init__.pyi b/tensorflow/keras/utils/__init__.pyi new file mode 100644 index 00000000..29d6e50e --- /dev/null +++ b/tensorflow/keras/utils/__init__.pyi @@ -0,0 +1,15 @@ +from typing import Generic, Iterator, TypeVar + +from abc import ABC, abstractmethod + +_T1 = TypeVar("_T1", covariant=True) + +class Sequence(Generic[_T1], ABC): + def on_epoch_end(self) -> None: ... + @abstractmethod + def __getitem__(self, index: int) -> _T1: ... + @abstractmethod + def __len__(self) -> int: ... + def __iter__(self) -> Iterator[_T1]: ... + +def set_random_seed(seed: int) -> None: ... diff --git a/tensorflow/keras/utils/experimental.pyi b/tensorflow/keras/utils/experimental.pyi new file mode 100644 index 00000000..aa7db496 --- /dev/null +++ b/tensorflow/keras/utils/experimental.pyi @@ -0,0 +1,12 @@ +from typing import Callable, Generic, TypeVar + +from tensorflow.data import Dataset +from tensorflow.distribute import InputContext, InputOptions + +_T1 = TypeVar("_T1") + +class DatasetCreator(Generic[_T1]): + def __init__( + self, dataset_fn: Callable[[InputContext], Dataset[_T1]], input_options: InputOptions | None = None + ): ... + def __call__(self, input_context: InputContext) -> Dataset[_T1]: ... diff --git a/tensorflow/linalg.pyi b/tensorflow/linalg.pyi new file mode 100644 index 00000000..628c86ec --- /dev/null +++ b/tensorflow/linalg.pyi @@ -0,0 +1,34 @@ +from typing import overload + +from builtins import bool as _bool + +from tensorflow import RaggedTensor, Tensor, TensorCompatible + +from bento.utils.tensor_types import DTypeLike + +@overload +def matmul( + a: TensorCompatible, + b: TensorCompatible, + transpose_a: _bool = False, + transpose_b: _bool = False, + adjoint_a: _bool = False, + adjoint_b: _bool = False, + a_is_sparse: _bool = False, + b_is_sparse: _bool = False, + output_type: DTypeLike | None = None, + name: str | None = None, +) -> Tensor: ... +@overload +def matmul( + a: RaggedTensor, + b: RaggedTensor, + transpose_a: _bool = False, + transpose_b: _bool = False, + adjoint_a: _bool = False, + adjoint_b: _bool = False, + a_is_sparse: _bool = False, + b_is_sparse: _bool = False, + output_type: DTypeLike | None = None, + name: str | None = None, +) -> RaggedTensor: ... diff --git a/tensorflow/lookup.pyi b/tensorflow/lookup.pyi new file mode 100644 index 00000000..7ebfcc16 --- /dev/null +++ b/tensorflow/lookup.pyi @@ -0,0 +1,68 @@ +from typing import Any, overload + +import tensorflow as tf +from tensorflow import RaggedTensor, ScalarTensorCompatible, Tensor, TensorCompatible +from tensorflow.sparse import SparseTensor + +from bento.utils.tensor_types import DTypeLike + +class TableInitializerBase: + def __init__( + self, + key_dtype: DTypeLike | None = None, + value_dtype: DTypeLike | None = None, + ) -> None: ... + @property + def key_dtype(self) -> tf.DType: ... + @property + def value_dtype(self) -> tf.DType: ... + +class KeyValueTensorInitializer(TableInitializerBase): + # Only rank 1 is supported. + def __init__( + self, + keys: TensorCompatible, + values: TensorCompatible, + key_dtype: DTypeLike | None = None, + value_dtype: DTypeLike | None = None, + name: str | None = None, + ) -> None: ... + +class StaticHashTable: + def __init__( + self, initializer: TableInitializerBase, default_value: ScalarTensorCompatible, name: str | None = None + ) -> None: ... + def export(self, name: str | None = None) -> tuple[Tensor, Tensor]: ... + def size(self, name: str | None = None) -> int: ... + @overload + def lookup(self, keys: TensorCompatible, name: str | None = None) -> Tensor: ... + @overload + def lookup(self, keys: SparseTensor, name: str | None = None) -> SparseTensor: ... + @property + def name(self) -> str: ... + def __getitem__(self, keys: TensorCompatible) -> Tensor: ... + +class StaticVocabularyTable: + def __init__( + self, + initializer: TableInitializerBase | None, + num_oov_buckets: int, + lookup_key_dtype: DTypeLike | None = None, + name: str | None = None, + experimental_is_anonymous: bool = False, + ) -> None: ... + @overload + def lookup(self, keys: TensorCompatible, name: str | None = None) -> Tensor: ... + @overload + def lookup(self, keys: SparseTensor, name: str | None = None) -> SparseTensor: ... + @overload + def lookup(self, keys: RaggedTensor, name: str | None = None) -> RaggedTensor: ... + @overload + def __getitem__(self, keys: TensorCompatible) -> Tensor: ... + @overload + def __getitem__(self, keys: SparseTensor) -> SparseTensor: ... + @overload + def __getitem__(self, keys: RaggedTensor) -> RaggedTensor: ... + def size(self, name: str | None = None) -> int: ... + +def __getattr__(name: str) -> Any: ... diff --git a/tensorflow/losses.pyi b/tensorflow/losses.pyi new file mode 100644 index 00000000..ec085147 --- /dev/null +++ b/tensorflow/losses.pyi @@ -0,0 +1 @@ +from tensorflow.keras.losses import Loss as Loss diff --git a/tensorflow/math.pyi b/tensorflow/math.pyi new file mode 100644 index 00000000..82b490a1 --- /dev/null +++ b/tensorflow/math.pyi @@ -0,0 +1,230 @@ +from typing import Iterable, overload + +import tensorflow as tf +from tensorflow import IndexedSlices, RaggedTensor, ShapeLike, Tensor, TensorCompatible, TensorCompatibleT +from tensorflow.sparse import SparseTensor + +from bento.utils.tensor_types import DTypeLike, RaggedTensorLikeT, SparseTensorCompatible + +# The documentation for tf.equal is a lie. It claims to support sparse tensors, but crashes on them. +# Whether an operation supports sparse tensors is poorly documented and needs to be verified +# manually. Most operations do not support sparse tensors. +@overload +def abs(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def abs(x: SparseTensor, name: str | None = None) -> SparseTensor: ... +@overload +def abs(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +@overload +def sin(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def sin(x: RaggedTensorLikeT, name: str | None = None) -> RaggedTensorLikeT: ... +@overload +def cos(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def cos(x: RaggedTensorLikeT, name: str | None = None) -> RaggedTensorLikeT: ... +@overload +def exp(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def exp(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +@overload +def sinh(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def sinh(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +@overload +def cosh(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def cosh(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +@overload +def tanh(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def tanh(x: SparseTensor, name: str | None = None) -> SparseTensor: ... +@overload +def tanh(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +def expm1(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def log(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def log(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +@overload +def log1p(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def log1p(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +@overload +def negative(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def negative(x: SparseTensor, name: str | None = None) -> SparseTensor: ... +@overload +def negative(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +def sigmoid(x: TensorCompatible, name: str | None = None) -> Tensor: ... +def add(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def add_n(x: Iterable[TensorCompatible | IndexedSlices], name: str | None = None) -> Tensor: ... +@overload +def add_n(x: Iterable[RaggedTensor], name: str | None = None) -> RaggedTensor: ... +@overload +def subtract(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def subtract(x: TensorCompatible | RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +@overload +def subtract( + x: TensorCompatible | RaggedTensor, y: TensorCompatible | RaggedTensor, name: str | None = None +) -> Tensor | RaggedTensor: ... +@overload +def multiply(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def multiply(x: RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +def multiply_no_nan(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +def divide(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +def divide_no_nan(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def floormod(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def floormod(x: RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +@overload +def ceil(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def ceil(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +@overload +def floor(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def floor(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +def accumulate_n( + inputs: list[TensorCompatibleT] | tuple[TensorCompatibleT, ...], shape: ShapeLike | None = None +) -> Tensor: ... +@overload +def pow(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def pow(x: RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +@overload +def reciprocal(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def reciprocal(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +@overload +def is_nan(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def is_nan(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +@overload +def minimum(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def minimum(x: tf.RaggedTensor, y: TensorCompatible | tf.RaggedTensor, name: str | None = None) -> tf.RaggedTensor: ... +@overload +def minimum(x: TensorCompatible | tf.RaggedTensor, y: tf.RaggedTensor, name: str | None = None) -> tf.RaggedTensor: ... +@overload +def maximum(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def maximum(x: tf.RaggedTensor, y: TensorCompatible | tf.RaggedTensor, name: str | None = None) -> tf.RaggedTensor: ... +@overload +def maximum(x: TensorCompatible | tf.RaggedTensor, y: tf.RaggedTensor, name: str | None = None) -> tf.RaggedTensor: ... +@overload +def logical_not(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def logical_not(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +def logical_and(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def logical_or(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def logical_or(x: tf.RaggedTensor, y: tf.RaggedTensor, name: str | None = None) -> tf.RaggedTensor: ... +def logical_xor(x: TensorCompatible, y: TensorCompatible, name: str | None = "LogicalXor") -> Tensor: ... +@overload +def equal(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def equal(x: RaggedTensor, y: RaggedTensor | float, name: str | None = None) -> RaggedTensor: ... +@overload +def not_equal(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def not_equal(x: RaggedTensor, y: RaggedTensor | float, name: str | None = None) -> RaggedTensor: ... +def greater(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +def greater_equal(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +def less(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +def less_equal(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ... +def segment_sum(data: TensorCompatible, segment_ids: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def sign(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def sign(x: SparseTensor, name: str | None = None) -> SparseTensor: ... +@overload +def sqrt(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def sqrt(x: SparseTensor, name: str | None = None) -> SparseTensor: ... +@overload +def sqrt(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +def rsqrt(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def square(x: TensorCompatible, name: str | None = None) -> Tensor: ... +@overload +def square(x: SparseTensor, name: str | None = None) -> SparseTensor: ... +@overload +def square(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ... +def softplus(features: TensorCompatible, name: str | None = None) -> Tensor: ... + +# Depending on the method axis is either a rank 0 tensor or a rank 0/1 tensor. +def reduce_mean( + input_tensor: TensorCompatible, + axis: TensorCompatible | None = None, + keepdims: bool = False, + name: str | None = None, +) -> Tensor: ... +def reduce_sum( + input_tensor: TensorCompatible, + axis: TensorCompatible | None = None, + keepdims: bool = False, + name: str | None = None, +) -> Tensor: ... +def reduce_max( + input_tensor: TensorCompatible, + axis: TensorCompatible | None = None, + keepdims: bool = False, + name: str | None = None, +) -> Tensor: ... +def reduce_min( + input_tensor: TensorCompatible, + axis: TensorCompatible | None = None, + keepdims: bool = False, + name: str | None = None, +) -> Tensor: ... +def reduce_prod( + input_tensor: TensorCompatible, + axis: TensorCompatible | None = None, + keepdims: bool = False, + name: str | None = None, +) -> Tensor: ... +def reduce_std( + input_tensor: TensorCompatible, + axis: TensorCompatible | None = None, + keepdims: bool = False, + name: str | None = None, +) -> Tensor: ... +def argmax( + input: TensorCompatible, + axis: TensorCompatible | None = None, + output_type: DTypeLike = tf.int32, + name: str | None = None, +) -> Tensor: ... +def argmin( + input: TensorCompatible, + axis: TensorCompatible | None = None, + output_type: DTypeLike = tf.int32, + name: str | None = None, +) -> Tensor: ... + +# Only for bool tensors. +def reduce_any( + input_tensor: TensorCompatible | RaggedTensor, + axis: TensorCompatible | None = None, + keep_dims: bool = False, + name: str | None = None, +) -> Tensor: ... +def reduce_all( + input_tensor: TensorCompatible | RaggedTensor, + axis: TensorCompatible | None = None, + keep_dims: bool = False, + name: str | None = None, +) -> Tensor: ... +def count_nonzero( + input: SparseTensorCompatible, + axis: TensorCompatible | None = None, + keepdims: bool | None = None, + dtype: DTypeLike = tf.dtypes.int64, + name: str | None = None, +) -> Tensor: ... diff --git a/tensorflow/metrics.pyi b/tensorflow/metrics.pyi new file mode 100644 index 00000000..a3ad1a52 --- /dev/null +++ b/tensorflow/metrics.pyi @@ -0,0 +1 @@ +from tensorflow.keras.metrics import Metric as Metric diff --git a/tensorflow/nn.pyi b/tensorflow/nn.pyi new file mode 100644 index 00000000..b61601eb --- /dev/null +++ b/tensorflow/nn.pyi @@ -0,0 +1,31 @@ +from tensorflow import RaggedTensor, ScalarTensorCompatible, SparseTensorCompatible, Tensor, TensorCompatible +from tensorflow.math import sigmoid as sigmoid +from tensorflow.math import tanh as tanh + +def relu(features: TensorCompatible, name: str | None = None) -> Tensor: ... +def leaky_relu(features: TensorCompatible, alpha: float = 0.2, name: str | None = None) -> Tensor: ... +def log_poisson_loss( + targets: TensorCompatible, log_input: TensorCompatible, compute_full_loss: bool = False, name: str | None = None +) -> Tensor: ... +def sigmoid_cross_entropy_with_logits( + labels: TensorCompatible, logits: TensorCompatible, name: str | None = None +) -> Tensor: ... +def softmax( + logits: TensorCompatible, axis: ScalarTensorCompatible | None = None, name: str | None = None +) -> Tensor: ... +def selu(features: TensorCompatible, name: str | None = None) -> Tensor: ... +def embeddings_lookup( + params: TensorCompatible, ids: TensorCompatible, max_norm: float | None = None, name: str | None = None +) -> Tensor: ... +def safe_embedding_lookup_sparse( + embedding_weights: Tensor | list[Tensor], + sparse_ids: SparseTensorCompatible, + sparse_weights: SparseTensorCompatible | None = None, + combiner: str = "mean", + default_id: ScalarTensorCompatible | None = None, + max_norm: float | None = None, + name: str | None = None, +) -> Tensor: ... +def moments( + x: TensorCompatible | RaggedTensor, axes: TensorCompatible, keepdims: bool = False, name: str | None = None +) -> tuple[Tensor, Tensor]: ... diff --git a/tensorflow/optimizers/__init__.pyi b/tensorflow/optimizers/__init__.pyi new file mode 100644 index 00000000..8eadb7c7 --- /dev/null +++ b/tensorflow/optimizers/__init__.pyi @@ -0,0 +1,5 @@ +from typing import Any + +from tensorflow.keras.optimizers import * + +def __getattr__(name: str) -> Any: ... diff --git a/tensorflow/python/__init__.pyi b/tensorflow/python/__init__.pyi new file mode 100644 index 00000000..e27843e5 --- /dev/null +++ b/tensorflow/python/__init__.pyi @@ -0,0 +1,3 @@ +from typing import Any + +def __getattr__(name: str) -> Any: ... diff --git a/tensorflow/python/framework/__init__.pyi b/tensorflow/python/framework/__init__.pyi new file mode 100644 index 00000000..e69de29b diff --git a/tensorflow/python/framework/test_util.pyi b/tensorflow/python/framework/test_util.pyi new file mode 100644 index 00000000..5fc37c2f --- /dev/null +++ b/tensorflow/python/framework/test_util.pyi @@ -0,0 +1,9 @@ +from typing import Callable, Type, TypeVar +from typing_extensions import ParamSpec + +P = ParamSpec("P") +R = TypeVar("R") +T = TypeVar("T") + +def run_all_in_graph_and_eager_modes(cls: Type[T]) -> Type[T]: ... +def run_in_graph_and_eager_modes(func: Callable[P, R]) -> Callable[P, R]: ... diff --git a/tensorflow/python/keras/__init__.pyi b/tensorflow/python/keras/__init__.pyi new file mode 100644 index 00000000..e69de29b diff --git a/tensorflow/python/keras/utils/__init__.pyi b/tensorflow/python/keras/utils/__init__.pyi new file mode 100644 index 00000000..e69de29b diff --git a/tensorflow/python/keras/utils/generic_utils.pyi b/tensorflow/python/keras/utils/generic_utils.pyi new file mode 100644 index 00000000..a9711357 --- /dev/null +++ b/tensorflow/python/keras/utils/generic_utils.pyi @@ -0,0 +1 @@ +def to_snake_case(name: str) -> str: ... diff --git a/tensorflow/python/layers/__init__.pyi b/tensorflow/python/layers/__init__.pyi new file mode 100644 index 00000000..e27843e5 --- /dev/null +++ b/tensorflow/python/layers/__init__.pyi @@ -0,0 +1,3 @@ +from typing import Any + +def __getattr__(name: str) -> Any: ... diff --git a/tensorflow/python/layers/base.pyi b/tensorflow/python/layers/base.pyi new file mode 100644 index 00000000..5ce083c1 --- /dev/null +++ b/tensorflow/python/layers/base.pyi @@ -0,0 +1,3 @@ +from typing import Any + +def __getattr__(name: str) -> Any: ... # type: ignore diff --git a/tensorflow/python/training/__init__.pyi b/tensorflow/python/training/__init__.pyi new file mode 100644 index 00000000..e69de29b diff --git a/tensorflow/python/training/tracking/__init__.pyi b/tensorflow/python/training/tracking/__init__.pyi new file mode 100644 index 00000000..e69de29b diff --git a/tensorflow/python/training/tracking/autotrackable.pyi b/tensorflow/python/training/tracking/autotrackable.pyi new file mode 100644 index 00000000..eedd1958 --- /dev/null +++ b/tensorflow/python/training/tracking/autotrackable.pyi @@ -0,0 +1 @@ +class AutoTrackable: ... diff --git a/tensorflow/ragged.pyi b/tensorflow/ragged.pyi new file mode 100644 index 00000000..1c601439 --- /dev/null +++ b/tensorflow/ragged.pyi @@ -0,0 +1,8 @@ +from typing import Any, Sequence + +from tensorflow import RaggedTensor, ScalarTensorCompatible, TensorCompatible + +def stack( + values: Sequence[TensorCompatible | RaggedTensor], axis: ScalarTensorCompatible = 0, name: str | None = None +) -> RaggedTensor: ... +def __getattr__(name: str) -> Any: ... diff --git a/tensorflow/random.pyi b/tensorflow/random.pyi new file mode 100644 index 00000000..aeba0666 --- /dev/null +++ b/tensorflow/random.pyi @@ -0,0 +1,40 @@ +import tensorflow as tf + +from bento.utils.tensor_types import DTypeLike, ScalarTensorCompatible, ShapeLike + +def uniform( + shape: ShapeLike, + minval: ScalarTensorCompatible = 0.0, + maxval: ScalarTensorCompatible | None = None, + dtype: DTypeLike = tf.float32, + seed: int | None = None, + name: str | None = None, +) -> tf.Tensor: ... +def normal( + shape: ShapeLike, + mean: ScalarTensorCompatible = 0.0, + stddev: ScalarTensorCompatible = 1.0, + dtype: DTypeLike = tf.float32, + seed: int | None = None, + name: str | None = None, +) -> tf.Tensor: ... +def truncated_normal( + shape: ShapeLike, + mean: ScalarTensorCompatible = 0.0, + stddev: ScalarTensorCompatible = 1.0, + dtype: DTypeLike = tf.float32, + seed: int | None = None, + name: str | None = None, +) -> tf.Tensor: ... +def poisson( + shape: ShapeLike, + lam: ScalarTensorCompatible = 1.0, + dtype: DTypeLike = tf.float32, + seed: int | None = None, + name: str | None = None, +) -> tf.Tensor: ... +def shuffle( + value: tf.Tensor, + seed: int | None = None, + name: str | None = None, +) -> tf.Tensor: ... diff --git a/tensorflow/saved_model/__init__.pyi b/tensorflow/saved_model/__init__.pyi new file mode 100644 index 00000000..2f4f3af9 --- /dev/null +++ b/tensorflow/saved_model/__init__.pyi @@ -0,0 +1,65 @@ +from typing import Any, Generic, Mapping, Sequence, TypeVar +from typing_extensions import ParamSpec + +import tensorflow as tf +from tensorflow.python.training.tracking.autotrackable import AutoTrackable +from tensorflow.saved_model.experimental import VariablePolicy +from tensorflow.types.experimental import ConcreteFunction, GenericFunction + +_P = ParamSpec("_P") +_R = TypeVar("_R", covariant=True) + +class SaveOptions: + __slots__ = ( + "namespace_whitelist", + "save_debug_info", + "function_aliases", + "experimental_io_device", + "experimental_variable_policy", + "experimental_custom_gradients", + ) + namespace_whitelist: list[str] + save_debug_info: bool + function_aliases: dict[str, tf.types.experimental.GenericFunction[..., object]] + experimental_io_device: str + experimental_variable_policy: VariablePolicy + experimental_custom_gradients: bool + def __init__( + self, + namespace_whitelist: list[str] | None = None, + save_debug_info: bool = False, + function_aliases: dict[str, tf.types.experimental.GenericFunction[..., object]] | None = None, + experimental_io_device: str | None = None, + experimental_variable_policy: str | VariablePolicy | None = None, + experimental_custom_gradients: bool = True, + ): ... + +class LoadOptions: + def __init__( + self, + allow_partial_checkpoint: bool = False, + experimental_io_device: str | None = None, + experimental_skip_checkpoint: bool = False, + ) -> None: ... + +class _LoadedModel(AutoTrackable, Generic[_P, _R]): + signatures: Mapping[str, ConcreteFunction[_P, _R]] + +def load( + export_dir: str, tags: str | Sequence[str] | None = None, options: LoadOptions | None = None +) -> _LoadedModel[..., Any]: ... + +_TF_Function = ConcreteFunction[..., object] | GenericFunction[..., object] + +def save( + obj: tf.Module, + export_dir: str, + signatures: _TF_Function | Mapping[str, _TF_Function] | None = None, + options: SaveOptions | None = None, +) -> None: ... + +SERVING = "serve" +TRAINING = "train" +EVAL = "eval" +GPU = "gpu" +TPU = "tpu" diff --git a/tensorflow/saved_model/experimental.pyi b/tensorflow/saved_model/experimental.pyi new file mode 100644 index 00000000..c2e9be31 --- /dev/null +++ b/tensorflow/saved_model/experimental.pyi @@ -0,0 +1,3 @@ +from enum import Enum + +class VariablePolicy(Enum): ... diff --git a/tensorflow/sparse.pyi b/tensorflow/sparse.pyi new file mode 100644 index 00000000..fd7890bb --- /dev/null +++ b/tensorflow/sparse.pyi @@ -0,0 +1,31 @@ +from tensorflow import SparseTensorCompatible, Tensor, TensorCompatible, TensorShape +from tensorflow.dtypes import DType + +class SparseTensor: + indices: Tensor + values: Tensor + dense_shape: Tensor + shape: TensorShape + dtype: DType + name: str + def __init__(self, indices: TensorCompatible, values: TensorCompatible, dense_shape: TensorCompatible) -> None: ... + def get_shape(self) -> TensorShape: ... + # Many arithmetic operations are not directly supported. Some have alternatives like tf.sparse.add instead of +. + def __div__(self, y: SparseTensorCompatible) -> SparseTensor: ... + def __truediv__(self, y: SparseTensorCompatible) -> SparseTensor: ... + def __mul__(self, y: SparseTensorCompatible) -> SparseTensor: ... + def __rmul__(self, y: SparseTensorCompatible) -> SparseTensor: ... + +def to_dense( + sp_input: SparseTensor, + default_value: SparseTensorCompatible | None = None, + validate_indices: bool = True, + name: str | None = None, +) -> Tensor: ... +def from_dense(tensor: Tensor, name: str | None = None) -> SparseTensor: ... +def slice( + sp_input: SparseTensor, + start: list[int] | tuple[int, ...] | Tensor, + size: TensorCompatible, + name: str | None = None, +) -> SparseTensor: ... diff --git a/tensorflow/strings.pyi b/tensorflow/strings.pyi new file mode 100644 index 00000000..9faae137 --- /dev/null +++ b/tensorflow/strings.pyi @@ -0,0 +1,49 @@ +from typing import Any, Literal, Sequence, overload + +from tensorflow import RaggedTensor, Tensor, TensorCompatible + +def join(inputs: Sequence[TensorCompatible], separator: str = "", name: str | None = None) -> Tensor: ... + +# None corresponds to "" for split. +def split( + input: TensorCompatible, sep: str | None = None, maxsplit: int = -1, name: str | None = None +) -> RaggedTensor: ... +@overload +def as_string( + input: TensorCompatible, + precision: int = -1, + scientific: bool = False, + shortest: bool = False, + width: int = -1, + fill: str = "", + name: str | None = None, +) -> Tensor: ... +@overload +def as_string( + input: RaggedTensor, + precision: int = -1, + scientific: bool = False, + shortest: bool = False, + width: int = -1, + fill: str = "", + name: str | None = None, +) -> RaggedTensor: ... +@overload +def unicode_decode( + input: TensorCompatible, + input_encoding: str, + errors: Literal["replace", "strict", "ignore"] = "replace", + replacement_char: int = 65533, + replace_control_characters: bool = False, + name: str | None = None, +) -> Tensor | RaggedTensor: ... +@overload +def unicode_decode( + input: RaggedTensor, + input_encoding: str, + errors: Literal["replace", "strict", "ignore"] = "replace", + replacement_char: int = 65533, + replace_control_characters: bool = False, + name: str | None = None, +) -> RaggedTensor: ... +def __getattr__(name: str) -> Any: ... diff --git a/tensorflow/summary.pyi b/tensorflow/summary.pyi new file mode 100644 index 00000000..c83cce8e --- /dev/null +++ b/tensorflow/summary.pyi @@ -0,0 +1,9 @@ +from typing import Callable, Iterator + +from contextlib import contextmanager + +import tensorflow as tf + +def scalar(name: str, data: float | tf.Tensor, step: int | None = None, description: str | None = None) -> bool: ... +@contextmanager +def record_if(condition: bool | tf.Tensor | Callable[[], bool]) -> Iterator[None]: ... diff --git a/tensorflow/test.pyi b/tensorflow/test.pyi new file mode 100644 index 00000000..5afc8187 --- /dev/null +++ b/tensorflow/test.pyi @@ -0,0 +1,21 @@ +from typing import Any, Mapping, Sequence, overload + +import numpy as np + +from tensorflow import ContainerArrays, ContainerTensorsLike, Operation, Tensor + +class TestCase: + def setUp(self) -> None: ... + def tearDown(self) -> None: ... + @overload + def evaluate(self, tensors: Tensor) -> np.ndarray[Any, Any]: ... + @overload + def evaluate(self, tensors: Operation) -> None: ... + @overload + def evaluate(self, tensors: Mapping[str, Tensor]) -> Mapping[str, np.ndarray[Any, Any]]: ... + @overload + def evaluate(self, tensors: Sequence[Tensor]) -> Sequence[np.ndarray[Any, Any]]: ... + @overload + def evaluate(self, tensors: ContainerTensorsLike) -> ContainerArrays: ... + def assertEqual(self, first: object, second: object, msg: str | None = None) -> None: ... + def get_temp_dir(self) -> str: ... diff --git a/tensorflow/types/__init__.pyi b/tensorflow/types/__init__.pyi new file mode 100644 index 00000000..0f708d7f --- /dev/null +++ b/tensorflow/types/__init__.pyi @@ -0,0 +1 @@ +from tensorflow.types import experimental as experimental diff --git a/tensorflow/types/experimental.pyi b/tensorflow/types/experimental.pyi new file mode 100644 index 00000000..1c3fa4de --- /dev/null +++ b/tensorflow/types/experimental.pyi @@ -0,0 +1,33 @@ +from typing import Generic, TypeVar +from typing_extensions import ParamSpec + +import tensorflow as tf + +P = ParamSpec("P") +R = TypeVar("R", covariant=True) + +class Callable(Generic[P, R]): + def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R: ... + +# This is a real tensorflow class, but it's undocumented. ConcreteFunction +# heavily wraps this class. +class _FuncGraph(tf.Graph): + name: str + inputs: list[tf.Tensor] + outputs: list[tf.Tensor] + control_outputs: list[tf.Operation] + structured_input_signature: tuple[tuple[tf.TensorSpec, ...], dict[str, tf.TensorSpec]] + seed: int | None + +class GenericFunction(Callable[P, R]): + def get_concrete_function(self, *args: P.args, **kwargs: P.kwargs) -> ConcreteFunction[P, R]: ... + +class ConcreteFunction(Callable[P, R]): + @property + def structured_input_signature(self) -> tuple[tuple[tf.TensorSpec, ...], dict[str, tf.TensorSpec]]: ... + @property + def graph(self) -> _FuncGraph: ... + @property + def inputs(self) -> list[tf.Tensor]: ... + @property + def outputs(self) -> list[tf.Tensor]: ...