### minitorch/operators.py
"""
Collection of the core mathematical operators used throughout the code base.
"""
import math
from typing import Callable, Iterable

def mul(x: float, y: float) -> float:
    """$f(x, y) = x * y$"""
    pass

def id(x: float) -> float:
    """$f(x) = x$"""
    pass

def add(x: float, y: float) -> float:
    """$f(x, y) = x + y$"""
    pass

def neg(x: float) -> float:
    """$f(x) = -x$"""
    pass

def lt(x: float, y: float) -> float:
    """$f(x) =$ 1.0 if x is less than y else 0.0"""
    pass

def eq(x: float, y: float) -> float:
    """$f(x) =$ 1.0 if x is equal to y else 0.0"""
    pass

def max(x: float, y: float) -> float:
    """$f(x) =$ x if x is greater than y else y"""
    pass

def is_close(x: float, y: float) -> float:
    """$f(x) = |x - y| < 1e-2$"""
    pass

def sigmoid(x: float) -> float:
    """
    $f(x) =  \\frac{1.0}{(1.0 + e^{-x})}$

    (See https://en.wikipedia.org/wiki/Sigmoid_function )

    Calculate as

    $f(x) =  \\frac{1.0}{(1.0 + e^{-x})}$ if x >=0 else $\\frac{e^x}{(1.0 + e^{x})}$

    for stability.
    """
    pass

def relu(x: float) -> float:
    """
    $f(x) =$ x if x is greater than 0, else 0

    (See https://en.wikipedia.org/wiki/Rectifier_(neural_networks) .)
    """
    pass
EPS = 1e-06

def log(x: float) -> float:
    """$f(x) = log(x)$"""
    pass

def exp(x: float) -> float:
    """$f(x) = e^{x}$"""
    pass

def log_back(x: float, d: float) -> float:
    """If $f = log$ as above, compute $d \\times f'(x)$"""
    pass

def inv(x: float) -> float:
    """$f(x) = 1/x$"""
    pass

def inv_back(x: float, d: float) -> float:
    """If $f(x) = 1/x$ compute $d \\times f'(x)$"""
    pass

def relu_back(x: float, d: float) -> float:
    """If $f = relu$ compute $d \\times f'(x)$"""
    pass

def map(fn: Callable[[float], float]) -> Callable[[Iterable[float]], Iterable[float]]:
    """
    Higher-order map.

    See https://en.wikipedia.org/wiki/Map_(higher-order_function)

    Args:
        fn: Function from one value to one value.

    Returns:
         A function that takes a list, applies `fn` to each element, and returns a
         new list
    """
    pass

def negList(ls: Iterable[float]) -> Iterable[float]:
    """Use `map` and `neg` to negate each element in `ls`"""
    pass

def zipWith(fn: Callable[[float, float], float]) -> Callable[[Iterable[float], Iterable[float]], Iterable[float]]:
    """
    Higher-order zipwith (or map2).

    See https://en.wikipedia.org/wiki/Map_(higher-order_function)

    Args:
        fn: combine two values

    Returns:
         Function that takes two equally sized lists `ls1` and `ls2`, produce a new list by
         applying fn(x, y) on each pair of elements.

    """
    pass

def addLists(ls1: Iterable[float], ls2: Iterable[float]) -> Iterable[float]:
    """Add the elements of `ls1` and `ls2` using `zipWith` and `add`"""
    pass

def reduce(fn: Callable[[float, float], float], start: float) -> Callable[[Iterable[float]], float]:
    """
    Higher-order reduce.

    Args:
        fn: combine two values
        start: start value $x_0$

    Returns:
         Function that takes a list `ls` of elements
         $x_1 \\ldots x_n$ and computes the reduction :math:`fn(x_3, fn(x_2,
         fn(x_1, x_0)))`
    """
    pass

def sum(ls: Iterable[float]) -> float:
    """Sum up a list using `reduce` and `add`."""
    pass

def prod(ls: Iterable[float]) -> float:
    """Product of a list using `reduce` and `mul`."""
    pass### minitorch/autodiff.py
from dataclasses import dataclass
from typing import Any, Iterable, List, Tuple
from typing_extensions import Protocol

def central_difference(f: Any, *vals: Any, arg: int=0, epsilon: float=1e-06) -> Any:
    """
    Computes an approximation to the derivative of `f` with respect to one arg.

    See :doc:`derivative` or https://en.wikipedia.org/wiki/Finite_difference for more details.

    Args:
        f : arbitrary function from n-scalar args to one value
        *vals : n-float values $x_0 \\ldots x_{n-1}$
        arg : the number $i$ of the arg to compute the derivative
        epsilon : a small constant

    Returns:
        An approximation of $f'_i(x_0, \\ldots, x_{n-1})$
    """
    pass
variable_count = 1

class Variable(Protocol):
    pass

def topological_sort(variable: Variable) -> Iterable[Variable]:
    """
    Computes the topological order of the computation graph.

    Args:
        variable: The right-most variable

    Returns:
        Non-constant Variables in topological order starting from the right.
    """
    pass

def backpropagate(variable: Variable, deriv: Any) -> None:
    """
    Runs backpropagation on the computation graph in order to
    compute derivatives for the leave nodes.

    Args:
        variable: The right-most variable
        deriv  : Its derivative that we want to propagate backward to the leaves.

    No return. Should write to its results to the derivative values of each leaf through `accumulate_derivative`.
    """
    pass

@dataclass
class Context:
    """
    Context class is used by `Function` to store information during the forward pass.
    """
    no_grad: bool = False
    saved_values: Tuple[Any, ...] = ()

    def save_for_backward(self, *values: Any) -> None:
        """Store the given `values` if they need to be used during backpropagation."""
        pass### minitorch/scalar_functions.py
from __future__ import annotations
from typing import TYPE_CHECKING
import minitorch
from . import operators
from .autodiff import Context
if TYPE_CHECKING:
    from typing import Tuple
    from .scalar import Scalar, ScalarLike

def wrap_tuple(x):
    """Turn a possible value into a tuple"""
    pass

def unwrap_tuple(x):
    """Turn a singleton tuple into a value"""
    pass

class ScalarFunction:
    """
    A wrapper for a mathematical function that processes and produces
    Scalar variables.

    This is a static class and is never instantiated. We use `class`
    here to group together the `forward` and `backward` code.
    """

class Add(ScalarFunction):
    """Addition function $f(x, y) = x + y$"""

class Log(ScalarFunction):
    """Log function $f(x) = log(x)$"""

class Mul(ScalarFunction):
    """Multiplication function"""

class Inv(ScalarFunction):
    """Inverse function"""

class Neg(ScalarFunction):
    """Negation function"""

class Sigmoid(ScalarFunction):
    """Sigmoid function"""

class ReLU(ScalarFunction):
    """ReLU function"""

class Exp(ScalarFunction):
    """Exp function"""

class LT(ScalarFunction):
    """Less-than function $f(x) =$ 1.0 if x is less than y else 0.0"""

class EQ(ScalarFunction):
    """Equal function $f(x) =$ 1.0 if x is equal to y else 0.0"""### minitorch/scalar.py
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Iterable, Optional, Sequence, Tuple, Type, Union
import numpy as np
from .autodiff import Context, Variable, backpropagate, central_difference
from .scalar_functions import EQ, LT, Add, Exp, Inv, Log, Mul, Neg, ReLU, ScalarFunction, Sigmoid
ScalarLike = Union[float, int, 'Scalar']

@dataclass
class ScalarHistory:
    """
    `ScalarHistory` stores the history of `Function` operations that was
    used to construct the current Variable.

    Attributes:
        last_fn : The last Function that was called.
        ctx : The context for that Function.
        inputs : The inputs that were given when `last_fn.forward` was called.

    """
    last_fn: Optional[Type[ScalarFunction]] = None
    ctx: Optional[Context] = None
    inputs: Sequence[Scalar] = ()
_var_count = 0

class Scalar:
    """
    A reimplementation of scalar values for autodifferentiation
    tracking. Scalar Variables behave as close as possible to standard
    Python numbers while also tracking the operations that led to the
    number's creation. They can only be manipulated by
    `ScalarFunction`.
    """
    history: Optional[ScalarHistory]
    derivative: Optional[float]
    data: float
    unique_id: int
    name: str

    def __init__(self, v: float, back: ScalarHistory=ScalarHistory(), name: Optional[str]=None):
        global _var_count
        _var_count += 1
        self.unique_id = _var_count
        self.data = float(v)
        self.history = back
        self.derivative = None
        if name is not None:
            self.name = name
        else:
            self.name = str(self.unique_id)

    def __repr__(self) -> str:
        return 'Scalar(%f)' % self.data

    def __mul__(self, b: ScalarLike) -> Scalar:
        return Mul.apply(self, b)

    def __truediv__(self, b: ScalarLike) -> Scalar:
        return Mul.apply(self, Inv.apply(b))

    def __rtruediv__(self, b: ScalarLike) -> Scalar:
        return Mul.apply(b, Inv.apply(self))

    def __add__(self, b: ScalarLike) -> Scalar:
        raise NotImplementedError('Need to implement for Task 1.2')

    def __bool__(self) -> bool:
        return bool(self.data)

    def __lt__(self, b: ScalarLike) -> Scalar:
        raise NotImplementedError('Need to implement for Task 1.2')

    def __gt__(self, b: ScalarLike) -> Scalar:
        raise NotImplementedError('Need to implement for Task 1.2')

    def __eq__(self, b: ScalarLike) -> Scalar:
        raise NotImplementedError('Need to implement for Task 1.2')

    def __sub__(self, b: ScalarLike) -> Scalar:
        raise NotImplementedError('Need to implement for Task 1.2')

    def __neg__(self) -> Scalar:
        raise NotImplementedError('Need to implement for Task 1.2')

    def __radd__(self, b: ScalarLike) -> Scalar:
        return self + b

    def __rmul__(self, b: ScalarLike) -> Scalar:
        return self * b

    def accumulate_derivative(self, x: Any) -> None:
        """
        Add `val` to the the derivative accumulated on this variable.
        Should only be called during autodifferentiation on leaf variables.

        Args:
            x: value to be accumulated
        """
        pass

    def is_leaf(self) -> bool:
        """True if this variable created by the user (no `last_fn`)"""
        pass

    def backward(self, d_output: Optional[float]=None) -> None:
        """
        Calls autodiff to fill in the derivatives for the history of this object.

        Args:
            d_output (number, opt): starting derivative to backpropagate through the model
                                   (typically left out, and assumed to be 1.0).
        """
        pass

def derivative_check(f: Any, *scalars: Scalar) -> None:
    """
    Checks that autodiff works on a python function.
    Asserts False if derivative is incorrect.

    Parameters:
        f : function from n-scalars to 1-scalar.
        *scalars  : n input scalar values.
    """
    pass### minitorch/module.py
from __future__ import annotations
from typing import Any, Dict, Optional, Sequence, Tuple

class Module:
    """
    Modules form a tree that store parameters and other
    submodules. They make up the basis of neural network stacks.

    Attributes:
        _modules : Storage of the child modules
        _parameters : Storage of the module's parameters
        training : Whether the module is in training mode or evaluation mode

    """
    _modules: Dict[str, Module]
    _parameters: Dict[str, Parameter]
    training: bool

    def __init__(self) -> None:
        self._modules = {}
        self._parameters = {}
        self.training = True

    def modules(self) -> Sequence[Module]:
        """Return the direct child modules of this module."""
        pass

    def train(self) -> None:
        """Set the mode of this module and all descendent modules to `train`."""
        pass

    def eval(self) -> None:
        """Set the mode of this module and all descendent modules to `eval`."""
        pass

    def named_parameters(self) -> Sequence[Tuple[str, Parameter]]:
        """
        Collect all the parameters of this module and its descendents.


        Returns:
            The name and `Parameter` of each ancestor parameter.
        """
        pass

    def parameters(self) -> Sequence[Parameter]:
        """Enumerate over all the parameters of this module and its descendents."""
        pass

    def add_parameter(self, k: str, v: Any) -> Parameter:
        """
        Manually add a parameter. Useful helper for scalar parameters.

        Args:
            k: Local name of the parameter.
            v: Value for the parameter.

        Returns:
            Newly created parameter.
        """
        pass

    def __setattr__(self, key: str, val: Parameter) -> None:
        if isinstance(val, Parameter):
            self.__dict__['_parameters'][key] = val
        elif isinstance(val, Module):
            self.__dict__['_modules'][key] = val
        else:
            super().__setattr__(key, val)

    def __getattr__(self, key: str) -> Any:
        if key in self.__dict__['_parameters']:
            return self.__dict__['_parameters'][key]
        if key in self.__dict__['_modules']:
            return self.__dict__['_modules'][key]
        return None

    def __call__(self, *args: Any, **kwargs: Any) -> Any:
        return self.forward(*args, **kwargs)

    def __repr__(self) -> str:

        def _addindent(s_: str, numSpaces: int) -> str:
            s2 = s_.split('\n')
            if len(s2) == 1:
                return s_
            first = s2.pop(0)
            s2 = [numSpaces * ' ' + line for line in s2]
            s = '\n'.join(s2)
            s = first + '\n' + s
            return s
        child_lines = []
        for key, module in self._modules.items():
            mod_str = repr(module)
            mod_str = _addindent(mod_str, 2)
            child_lines.append('(' + key + '): ' + mod_str)
        lines = child_lines
        main_str = self.__class__.__name__ + '('
        if lines:
            main_str += '\n  ' + '\n  '.join(lines) + '\n'
        main_str += ')'
        return main_str

class Parameter:
    """
    A Parameter is a special container stored in a `Module`.

    It is designed to hold a `Variable`, but we allow it to hold
    any value for testing.
    """

    def __init__(self, x: Any, name: Optional[str]=None) -> None:
        self.value = x
        self.name = name
        if hasattr(x, 'requires_grad_'):
            self.value.requires_grad_(True)
            if self.name:
                self.value.name = self.name

    def update(self, x: Any) -> None:
        """Update the parameter value."""
        pass

    def __repr__(self) -> str:
        return repr(self.value)

    def __str__(self) -> str:
        return str(self.value)### minitorch/tensor_data.py
from __future__ import annotations
import random
from typing import Iterable, Optional, Sequence, Tuple, Union
import numba
import numpy as np
import numpy.typing as npt
from numpy import array, float64
from typing_extensions import TypeAlias
from .operators import prod
MAX_DIMS = 32

class IndexingError<response clipped><NOTE>Due to the max output limit, only part of the full response has been shown to you.</NOTE>[int]=None) -> Tensor:
        """Compute the mean over dimension `dim`"""
        pass

    def permute(self, *order: int) -> Tensor:
        """Permute tensor dimensions to *order"""
        pass

    def view(self, *shape: int) -> Tensor:
        """Change the shape of the tensor to a new shape with the same size"""
        pass

    def contiguous(self) -> Tensor:
        """Return a contiguous tensor with the same data"""
        pass

    def __repr__(self) -> str:
        return self._tensor.to_string()

    def __getitem__(self, key: Union[int, UserIndex]) -> float:
        key2 = (key,) if isinstance(key, int) else key
        return self._tensor.get(key2)

    def __setitem__(self, key: Union[int, UserIndex], val: float) -> None:
        key2 = (key,) if isinstance(key, int) else key
        self._tensor.set(key2, val)

    @staticmethod
    def make(storage: Union[Storage, List[float]], shape: UserShape, strides: Optional[UserStrides]=None, backend: Optional[TensorBackend]=None) -> Tensor:
        """Create a new tensor from data"""
        pass

    def expand(self, other: Tensor) -> Tensor:
        """
        Method used to allow for backprop over broadcasting.
        This method is called when the output of `backward`
        is a different size than the input of `forward`.


        Parameters:
            other : backward tensor (must broadcast with self)

        Returns:
            Expanded version of `other` with the right derivatives

        """
        pass

    def accumulate_derivative(self, x: Any) -> None:
        """
        Add `val` to the the derivative accumulated on this variable.
        Should only be called during autodifferentiation on leaf variables.

        Args:
            x : value to be accumulated
        """
        pass

    def is_leaf(self) -> bool:
        """True if this variable created by the user (no `last_fn`)"""
        pass

    def zero_grad_(self) -> None:
        """
        Reset the derivative on this variable.
        """
        pass### minitorch/nn.py
from typing import Tuple
from . import operators
from .autodiff import Context
from .fast_ops import FastOps
from .tensor import Tensor
from .tensor_functions import Function, rand, tensor

def tile(input: Tensor, kernel: Tuple[int, int]) -> Tuple[Tensor, int, int]:
    """
    Reshape an image tensor for 2D pooling

    Args:
        input: batch x channel x height x width
        kernel: height x width of pooling

    Returns:
        Tensor of size batch x channel x new_height x new_width x (kernel_height * kernel_width) as well as the new_height and new_width value.
    """
    pass

def avgpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
    """
    Tiled average pooling 2D

    Args:
        input : batch x channel x height x width
        kernel : height x width of pooling

    Returns:
        Pooled tensor
    """
    pass
max_reduce = FastOps.reduce(operators.max, -1000000000.0)

def argmax(input: Tensor, dim: int) -> Tensor:
    """
    Compute the argmax as a 1-hot tensor.

    Args:
        input : input tensor
        dim : dimension to apply argmax


    Returns:
        :class:`Tensor` : tensor with 1 on highest cell in dim, 0 otherwise

    """
    pass

class Max(Function):

    @staticmethod
    def forward(ctx: Context, input: Tensor, dim: Tensor) -> Tensor:
        """Forward of max should be max reduction"""
        pass

    @staticmethod
    def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, float]:
        """Backward of max should be argmax (see above)"""
        pass

def softmax(input: Tensor, dim: int) -> Tensor:
    """
    Compute the softmax as a tensor.



    $z_i = \\frac{e^{x_i}}{\\sum_i e^{x_i}}$

    Args:
        input : input tensor
        dim : dimension to apply softmax

    Returns:
        softmax tensor
    """
    pass

def logsoftmax(input: Tensor, dim: int) -> Tensor:
    """
    Compute the log of the softmax as a tensor.

    $z_i = x_i - \\log \\sum_i e^{x_i}$

    See https://en.wikipedia.org/wiki/LogSumExp#log-sum-exp_trick_for_log-domain_calculations

    Args:
        input : input tensor
        dim : dimension to apply log-softmax

    Returns:
         log of softmax tensor
    """
    pass

def maxpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
    """
    Tiled max pooling 2D

    Args:
        input: batch x channel x height x width
        kernel: height x width of pooling

    Returns:
        Tensor : pooled tensor
    """
    pass

def dropout(input: Tensor, rate: float, ignore: bool=False) -> Tensor:
    """
    Dropout positions based on random noise.

    Args:
        input : input tensor
        rate : probability [0, 1) of dropping out each position
        ignore : skip dropout, i.e. do nothing at all

    Returns:
        tensor with random positions dropped out
    """
    pass### minitorch/fast_ops.py
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
from numba import njit, prange
from .tensor_data import MAX_DIMS, broadcast_index, index_to_position, shape_broadcast, to_index
from .tensor_ops import MapProto, TensorOps
if TYPE_CHECKING:
    from typing import Callable, Optional
    from .tensor import Tensor
    from .tensor_data import Index, Shape, Storage, Strides
to_index = njit(inline='always')(to_index)
index_to_position = njit(inline='always')(index_to_position)
broadcast_index = njit(inline='always')(broadcast_index)

class FastOps(TensorOps):

    @staticmethod
    def map(fn: Callable[[float], float]) -> MapProto:
        """See `tensor_ops.py`"""
        pass

    @staticmethod
    def zip(fn: Callable[[float, float], float]) -> Callable[[Tensor, Tensor], Tensor]:
        """See `tensor_ops.py`"""
        pass

    @staticmethod
    def reduce(fn: Callable[[float, float], float], start: float=0.0) -> Callable[[Tensor, int], Tensor]:
        """See `tensor_ops.py`"""
        pass

    @staticmethod
    def matrix_multiply(a: Tensor, b: Tensor) -> Tensor:
        """
        Batched tensor matrix multiply ::

            for n:
              for i:
                for j:
                  for k:
                    out[n, i, j] += a[n, i, k] * b[n, k, j]

        Where n indicates an optional broadcasted batched dimension.

        Should work for tensor shapes of 3 dims ::

            assert a.shape[-1] == b.shape[-2]

        Args:
            a : tensor data a
            b : tensor data b

        Returns:
            New tensor data
        """
        pass

def tensor_map(fn: Callable[[float], float]) -> Callable[[Storage, Shape, Strides, Storage, Shape, Strides], None]:
    """
    NUMBA low_level tensor_map function. See `tensor_ops.py` for description.

    Optimizations:

    * Main loop in parallel
    * All indices use numpy buffers
    * When `out` and `in` are stride-aligned, avoid indexing

    Args:
        fn: function mappings floats-to-floats to apply.

    Returns:
        Tensor map function.
    """
    pass

def tensor_zip(fn: Callable[[float, float], float]) -> Callable[[Storage, Shape, Strides, Storage, Shape, Strides, Storage, Shape, Strides], None]:
    """
    NUMBA higher-order tensor zip function. See `tensor_ops.py` for description.


    Optimizations:

    * Main loop in parallel
    * All indices use numpy buffers
    * When `out`, `a`, `b` are stride-aligned, avoid indexing

    Args:
        fn: function maps two floats to float to apply.

    Returns:
        Tensor zip function.
    """
    pass

def tensor_reduce(fn: Callable[[float, float], float]) -> Callable[[Storage, Shape, Strides, Storage, Shape, Strides, int], None]:
    """
    NUMBA higher-order tensor reduce function. See `tensor_ops.py` for description.

    Optimizations:

    * Main loop in parallel
    * All indices use numpy buffers
    * Inner-loop should not call any functions or write non-local variables

    Args:
        fn: reduction function mapping two floats to float.

    Returns:
        Tensor reduce function
    """
    pass

def _tensor_matrix_multiply(out: Storage, out_shape: Shape, out_strides: Strides, a_storage: Storage, a_shape: Shape, a_strides: Strides, b_storage: Storage, b_shape: Shape, b_strides: Strides) -> None:
    """
    NUMBA tensor matrix multiply function.

    Should work for any tensor shapes that broadcast as long as

    ```
    assert a_shape[-1] == b_shape[-2]
    ```

    Optimizations:

    * Outer loop in parallel
    * No index buffers or function calls
    * Inner loop should have no global writes, 1 multiply.


    Args:
        out (Storage): storage for `out` tensor
        out_shape (Shape): shape for `out` tensor
        out_strides (Strides): strides for `out` tensor
        a_storage (Storage): storage for `a` tensor
        a_shape (Shape): shape for `a` tensor
        a_strides (Strides): strides for `a` tensor
        b_storage (Storage): storage for `b` tensor
        b_shape (Shape): shape for `b` tensor
        b_strides (Strides): strides for `b` tensor

    Returns:
        None : Fills in `out`
    """
    pass
tensor_matrix_multiply = njit(parallel=True, fastmath=True)(_tensor_matrix_multiply)### minitorch/fast_conv.py
from typing import Tuple
import numpy as np
from numba import njit, prange
from .autodiff import Context
from .tensor import Tensor
from .tensor_data import MAX_DIMS, Index, Shape, Strides, broadcast_index, index_to_position, to_index
from .tensor_functions import Function
to_index = njit(inline='always')(to_index)
index_to_position = njit(inline='always')(index_to_position)
broadcast_index = njit(inline='always')(broadcast_index)

def _tensor_conv1d(out: Tensor, out_shape: Shape, out_strides: Strides, out_size: int, input: Tensor, input_shape: Shape, input_strides: Strides, weight: Tensor, weight_shape: Shape, weight_strides: Strides, reverse: bool) -> None:
    """
    1D Convolution implementation.

    Given input tensor of

       `batch, in_channels, width`

    and weight tensor

       `out_channels, in_channels, k_width`

    Computes padded output of

       `batch, out_channels, width`

    `reverse` decides if weight is anchored left (False) or right.
    (See diagrams)

    Args:
        out (Storage): storage for `out` tensor.
        out_shape (Shape): shape for `out` tensor.
        out_strides (Strides): strides for `out` tensor.
        out_size (int): size of the `out` tensor.
        input (Storage): storage for `input` tensor.
        input_shape (Shape): shape for `input` tensor.
        input_strides (Strides): strides for `input` tensor.
        weight (Storage): storage for `input` tensor.
        weight_shape (Shape): shape for `input` tensor.
        weight_strides (Strides): strides for `input` tensor.
        reverse (bool): anchor weight at left or right
    """
    pass
tensor_conv1d = njit(parallel=True)(_tensor_conv1d)

class Conv1dFun(Function):

    @staticmethod
    def forward(ctx: Context, input: Tensor, weight: Tensor) -> Tensor:
        """
        Compute a 1D Convolution

        Args:
            ctx : Context
            input : batch x in_channel x h x w
            weight : out_channel x in_channel x kh x kw

        Returns:
            batch x out_channel x h x w
        """
        pass
conv1d = Conv1dFun.apply

def _tensor_conv2d(out: Tensor, out_shape: Shape, out_strides: Strides, out_size: int, input: Tensor, input_shape: Shape, input_strides: Strides, weight: Tensor, weight_shape: Shape, weight_strides: Strides, reverse: bool) -> None:
    """
    2D Convolution implementation.

    Given input tensor of

       `batch, in_channels, height, width`

    and weight tensor

       `out_channels, in_channels, k_height, k_width`

    Computes padded output of

       `batch, out_channels, height, width`

    `Reverse` decides if weight is anchored top-left (False) or bottom-right.
    (See diagrams)


    Args:
        out (Storage): storage for `out` tensor.
        out_shape (Shape): shape for `out` tensor.
        out_strides (Strides): strides for `out` tensor.
        out_size (int): size of the `out` tensor.
        input (Storage): storage for `input` tensor.
        input_shape (Shape): shape for `input` tensor.
        input_strides (Strides): strides for `input` tensor.
        weight (Storage): storage for `input` tensor.
        weight_shape (Shape): shape for `input` tensor.
        weight_strides (Strides): strides for `input` tensor.
        reverse (bool): anchor weight at top-left or bottom-right
    """
    pass
tensor_conv2d = njit(parallel=True, fastmath=True)(_tensor_conv2d)

class Conv2dFun(Function):

    @staticmethod
    def forward(ctx: Context, input: Tensor, weight: Tensor) -> Tensor:
        """
        Compute a 2D Convolution

        Args:
            ctx : Context
            input : batch x in_channel x h x w
            weight  : out_channel x in_channel x kh x kw

        Returns:
            (:class:`Tensor`) : batch x out_channel x h x w
        """
        pass
conv2d = Conv2dFun.apply### minitorch/testing.py
from typing import Callable, Generic, Iterable, Tuple, TypeVar
import minitorch.operators as operators
A = TypeVar('A')

class MathTest(Generic[A]):

    @staticmethod
    def neg(a: A) -> A:
        """Negate the argument"""
        pass

    @staticmethod
    def addConstant(a: A) -> A:
        """Add contant to the argument"""
        pass

    @staticmethod
    def square(a: A) -> A:
        """Manual square"""
        pass

    @staticmethod
    def cube(a: A) -> A:
        """Manual cube"""
        pass

    @staticmethod
    def subConstant(a: A) -> A:
        """Subtract a constant from the argument"""
        pass

    @staticmethod
    def multConstant(a: A) -> A:
        """Multiply a constant to the argument"""
        pass

    @staticmethod
    def div(a: A) -> A:
        """Divide by a constant"""
        pass

    @staticmethod
    def inv(a: A) -> A:
        """Invert after adding"""
        pass

    @staticmethod
    def sig(a: A) -> A:
        """Apply sigmoid"""
        pass

    @staticmethod
    def log(a: A) -> A:
        """Apply log to a large value"""
        pass

    @staticmethod
    def relu(a: A) -> A:
        """Apply relu"""
        pass

    @staticmethod
    def exp(a: A) -> A:
        """Apply exp to a smaller value"""
        pass

    @staticmethod
    def add2(a: A, b: A) -> A:
        """Add two arguments"""
        pass

    @staticmethod
    def mul2(a: A, b: A) -> A:
        """Mul two arguments"""
        pass

    @staticmethod
    def div2(a: A, b: A) -> A:
        """Divide two arguments"""
        pass

    @classmethod
    def _tests(cls) -> Tuple[Tuple[str, Callable[[A], A]], Tuple[str, Callable[[A, A], A]], Tuple[str, Callable[[Iterable[A]], A]]]:
        """
        Returns a list of all the math tests.
        """
        pass

class MathTestVariable(MathTest):
    pass
[The command completed with exit code 0.]
[Current working directory: /workspace/minitorch]
[Python interpreter: /usr/bin/python]
[Command finished with exit code 0]