Source code for k1lib._monkey

# AUTOGENERATED FILE! PLEASE DON'T EDIT
import torch, k1lib, numpy as np
from torch import nn; from k1lib import cli
from typing import List, Tuple, ContextManager
from contextlib import contextmanager
@k1lib.patch(nn.Module)
def importParams(self:nn.Module, params:List[nn.Parameter]):
    """Given a list of :class:`torch.nn.parameter.Parameter`/:class:`torch.Tensor`,
update the current :class:`torch.nn.Module`'s parameters with it'"""
    for oldParam, newParam in zip(self.parameters(), params):
        oldParam.data = newParam.data.clone()
@k1lib.patch(nn.Module)
def exportParams(self:nn.Module) -> List[torch.Tensor]:
    """Gets the list of :class:`torch.Tensor` data"""
    return [param.data.clone() for param in self.parameters()]
class ParamsContext:
    def __init__(self, m:nn.Module): self.m = m
    def __enter__(self): self.params = self.m.exportParams(); return self.params
    def __exit__(self, *ignored): self.m.importParams(self.params)
@k1lib.patch(nn.Module)
@contextmanager
def paramsContext(self:nn.Module):
    """A nice context manager for :meth:`importParams` and :meth:`exportParams`.
Returns the old parameters on enter context. Example::

    m = nn.Linear(2, 3)
    with m.paramsContext() as oldParam:
        pass # go wild, train, mutate `m` however much you like
    # m automatically snaps back to the old param

Small reminder that this is not foolproof, as there are some :class:`~torch.nn.Module`
that stores extra information not accessible from the model itself, like
:class:`~torch.nn.BatchNorm2d`."""
    params = self.exportParams()
    try: yield
    finally: self.importParams(params)
@k1lib.patch(nn.Module)
def getParamsVector(model:nn.Module) -> List[torch.Tensor]:
    """For each parameter, returns a normal distributed random tensor
with the same standard deviation as the original parameter"""
    answer = []
    for param in model.parameters():
        a = torch.randn(param.shape).to(param.device)
        b = param.std() if param.numel() > 1 else 1
        answer.append(a * b)
    return answer
class _NnModuleDeviceContext:
    def __init__(self, nnModule):
        self.nnModule = nnModule
    def __enter__(self):
        self.devices = [p.device for p in self.nnModule.parameters()]
    def __exit__(self, *ignored):
        for p, device in zip(self.nnModule.parameters(), self.devices):
            p.data = p.to(device=device)
@k1lib.patch(nn.Module)
def preserveDevice(self:nn.Module) -> ContextManager:
    """Preserves the device of whatever operation is inside this. Example::

    import torch.nn as nn
    m = nn.Linear(3, 4)
    with m.preserveDevice():
        m.cuda() # moves whole model to cuda
    # automatically moves model to cpu

This will work even if the model has many tensors that live on 10 different devices."""
    return _NnModuleDeviceContext(self)
@k1lib.patch(nn.Module)
def __ror__(self, x):
    """Allows piping input to :class:`torch.nn.Module`, to match same style as
the module :mod:`k1lib.cli`. Example::

    # returns torch.Size([5, 3])
    torch.randn(5, 2) | nn.Linear(2, 3) | cli.shape()"""
    return self(x)
@k1lib.patch(nn.Module, name="nParams")
@property
def nParams(self):
    """Get the number of parameters of this module.
Example::

    # returns 9, because 6 (2*3) for weight, and 3 for bias
    nn.Linear(2, 3).nParams
"""
    return sum([p.numel() for p in self.parameters()])
@k1lib.patch(torch)
@k1lib.patch(torch.Tensor)
def crissCross(*others:Tuple[torch.Tensor]) -> torch.Tensor:
    """Concats multiple 1d tensors, sorts it, and get evenly-spaced values. Also
available as :meth:`torch.crissCross` and :meth:`~k1lib.cli.others.crissCross`.
Example::

    a = torch.tensor([2, 2, 3, 6])
    b = torch.tensor([4, 8, 10, 12, 18, 20, 30, 35])
    
    # returns tensor([2, 3, 6, 10, 18, 30])
    a.crissCross(b)
    
    # returns tensor([ 2,  4,  8, 10, 18, 20, 30, 35])
    a.crissCross(*([b]*10)) # 1 "a" and 10 "b"s
    
    # returns tensor([ 2,  2,  3,  6, 18])
    b.crissCross(*([a]*10)) # 1 "b" and 10 "a"s

Note how in the second case, the length is the same as tensor b, and the contents
are pretty close to b. In the third case, it's the opposite. Length is almost
the same as tensor a, and the contents are also pretty close to a."""
    return torch.cat([o.flatten() for o in others]).sort()[0][::len(others)]
@k1lib.patch(torch.Tensor)
def histBounds(self:torch.Tensor, bins=100) -> torch.Tensor:
    r"""Flattens and sorts the tensor, then get value of tensor at regular
linspace intervals. Does not guarantee bounds' uniqueness. Example::

    # Tensor with lots of 2s and 5s
    a = torch.Tensor([2]*5 + [3]*3 + [4] + [5]*4)
    # returns torch.tensor([2., 3., 5.])
    a.histBounds(3).unique()

The example result essentially shows 3 bins: :math:`[2, 3)`, :math:`[3, 5)` and
:math:`[5, \infty)`. This might be useful in scaling pixels so that networks handle
it nicely. Rough idea taken from fastai.medical.imaging."""
    sortedTensor = self.flatten().sort()[0]
    ls = torch.linspace(0, 1, bins); ls[-1] = 1-1e-6
    bigLs = (ls * len(sortedTensor)).long()
    return sortedTensor[bigLs]
@k1lib.patch(torch.Tensor)
def histScaled(self:torch.Tensor, bins=100, bounds=None) -> torch.Tensor:
    """Scale tensor's values so that the values are roughly spreaded out in range
:math:`[0, 1]` to ease neural networks' pain. Rough idea taken from
fastai.medical.imaging. Example::

    # normal-distributed values
    a = torch.randn(1000)
    # plot #1 shows a normal distribution
    plt.hist(a.numpy(), bins=30); plt.show()
    # plot #2 shows almost-uniform distribution
    plt.hist(a.histScaled().numpy()); plt.show()

Plot #1:

.. image:: images/histScaledNormal.png

Plot #2:

.. image:: images/histScaledUniform.png

:param bins: if ``bounds`` not specified, then will scale according to a hist
    with this many bins
:param bounds: if specified, then ``bins`` is ignored and will scale according to
    this. Expected this to be a sorted tensor going from ``min(self)`` to
    ``max(self)``."""
    if bounds is None: bounds = self.histBounds(bins).unique()
    else: bounds = bounds.unique()
    out = np.interp(self.numpy().flatten(), bounds, np.linspace(0, 1, len(bounds)))
    return torch.tensor(out).reshape(self.shape)
@k1lib.patch(torch.Tensor)
def clearNan(self, value:float=0.0) -> torch.Tensor:
    """Sets all nan values to a specified value.
Example::

    a = torch.randn(3, 3) * float("nan")
    a.clearNan() # now full of zeros"""
    self[self != self] = value
    return self
@k1lib.patch(torch.Tensor)
def hasNan(self) -> bool:
    """Returns whether this Tensor has any nan values at all."""
    return (self != self).sum() > 0
try:
    import graphviz
    @k1lib.patch(graphviz.Digraph, "__call__")
    @k1lib.patch(graphviz.Graph, "__call__")
    def _call(self, _from, *tos, **kwargs):
        """Convenience method to quickly construct graphs.
Example::

    g = k1lib.graph()
    g("a", "b", "c")
    g # displays arrows from "a" to "b" and "a" to "c"
"""
        for to in tos: self.edge(_from, to, **kwargs)
[docs] def digraph(): """Convenience method for creating a new graphviz Digraph. Example:: g = k1lib.graph() g("a", "b", "c") g # displays arrows from "a" to "b" and "a" to "c" """ return graphviz.Digraph(graph_attr={"rankdir":"TB"})
[docs] def graph(): """Convenience method for creating a new graphviz Graph. See also: :meth:`digraph`""" return graphviz.Graph(graph_attr={"rankdir":"TB"})
except ImportError: digraph = graph = lambda: print("Module `graphviz` not found! Please install it first, something like `pip install graphviz`")