Top

tensorbuilder.patches.layers_patch module

import tensorflow as tf
import inspect
import functools
from tensorflow.contrib import layers
from tensorflow.contrib.layers import fully_connected, convolution2d
from tensorbuilder import TensorBuilder
from phi import utils, P, patch
from phi.builder import Builder


class LayerBuilder(Builder):
    """docstring for LayerBuilder."""

    @property
    def TensorBuilder(self):
        return TensorBuilder()._unit(self._f, self._refs)

#Add property to TensorBuilder
TensorBuilder.layers = property(lambda self: LayerBuilder()._unit(self._f, self._refs))

# patch all layer functions
patch.builder_with_members_from_1(LayerBuilder, layers, module_alias="tf.contrib.layers") #, _return_type=TensorBuilder)

# fully conneted layers
blacklist = (
    ["relu_layer"] +
    TensorBuilder.__core__
)

funs = ( (name, f) for (name, f) in inspect.getmembers(tf.nn, inspect.isfunction) if name not in blacklist )

def register_layer_functions(name, f):
    explanation = """and the keyword argument `activation_fn` is set to `tf.nn.{0}`.""".format(name)

    @TensorBuilder.Register1("tf.contrib.layers", name + "_layer", wrapped=fully_connected, explanation=explanation) #, _return_type=TensorBuilder)
    def layer_function(*args, **kwargs):
        kwargs['activation_fn'] = f
        return fully_connected(*args, **kwargs)

def register_conv_layer_functions(name, f):
    explanation = """and the keyword argument `activation_fn` is set to `tf.nn.{0}`.""".format(name)

    @TensorBuilder.Register1("tf.contrib.layers", name + "_conv2d_layer", wrapped=convolution2d, explanation=explanation) #, _return_type=TensorBuilder)
    def layer_function(*args, **kwargs):
        kwargs['activation_fn'] = f
        return convolution2d(*args, **kwargs)

for name, f in funs:
    register_layer_functions(name, f)
    register_conv_layer_functions(name, f)



#linear_layer
explanation = """and the keyword argument `activation_fn` is set to `None`."""

@TensorBuilder.Register1("tf.contrib.layers", alias="linear_layer", wrapped=fully_connected, explanation=explanation) #, _return_type=TensorBuilder)
def linear_layer(*args, **kwargs):
    kwargs['activation_fn'] = None
    return tf.contrib.layers.fully_connected(*args, **kwargs)

@TensorBuilder.Register1("tf.contrib.layers", alias="linear_conv2d_layer", wrapped=convolution2d, explanation=explanation) #, _return_type=TensorBuilder)
def linear_conv2d_layer(*args, **kwargs):
    kwargs['activation_fn'] = None
    return tf.contrib.layers.fully_connected(*args, **kwargs)

def _polynomial(tensor):
    size = int(tensor.get_shape()[1])
    pows = [ tf.pow(tensor[:, n], n + 1) for n in range(size) ]
    return tf.transpose(tf.pack(pows))

explanation = """
However, it uses an activation function of the form
```
y(i) = z(i)^(i+1)
```
where `z = w*x + b`
"""

@TensorBuilder.Register1("tb", alias="polynomial_layer", wrapped=fully_connected, explanation=explanation) #, _return_type=TensorBuilder)
def polynomial_layer(*args, **kwargs):
    kwargs['activation_fn'] = _polynomial
    return layers.fully_connected(*args, **kwargs)


whitelist = ["convolution2d", "max_pool2d", "avg_pool2d", "flatten"]
patch.builder_with_members_from_1(TensorBuilder, layers, module_alias="tf.contrib.layers", whitelist=lambda x: x in whitelist) #, _return_type=TensorBuilder)

Module variables

var blacklist

var explanation

var funs

var name

var whitelist

Functions

def linear_conv2d_layer(

*args, **kwargs)

@TensorBuilder.Register1("tf.contrib.layers", alias="linear_conv2d_layer", wrapped=convolution2d, explanation=explanation) #, _return_type=TensorBuilder)
def linear_conv2d_layer(*args, **kwargs):
    kwargs['activation_fn'] = None
    return tf.contrib.layers.fully_connected(*args, **kwargs)

def linear_layer(

*args, **kwargs)

@TensorBuilder.Register1("tf.contrib.layers", alias="linear_layer", wrapped=fully_connected, explanation=explanation) #, _return_type=TensorBuilder)
def linear_layer(*args, **kwargs):
    kwargs['activation_fn'] = None
    return tf.contrib.layers.fully_connected(*args, **kwargs)

def polynomial_layer(

*args, **kwargs)

@TensorBuilder.Register1("tb", alias="polynomial_layer", wrapped=fully_connected, explanation=explanation) #, _return_type=TensorBuilder)
def polynomial_layer(*args, **kwargs):
    kwargs['activation_fn'] = _polynomial
    return layers.fully_connected(*args, **kwargs)

def register_conv_layer_functions(

name, f)

def register_conv_layer_functions(name, f):
    explanation = """and the keyword argument `activation_fn` is set to `tf.nn.{0}`.""".format(name)

    @TensorBuilder.Register1("tf.contrib.layers", name + "_conv2d_layer", wrapped=convolution2d, explanation=explanation) #, _return_type=TensorBuilder)
    def layer_function(*args, **kwargs):
        kwargs['activation_fn'] = f
        return convolution2d(*args, **kwargs)

def register_layer_functions(

name, f)

def register_layer_functions(name, f):
    explanation = """and the keyword argument `activation_fn` is set to `tf.nn.{0}`.""".format(name)

    @TensorBuilder.Register1("tf.contrib.layers", name + "_layer", wrapped=fully_connected, explanation=explanation) #, _return_type=TensorBuilder)
    def layer_function(*args, **kwargs):
        kwargs['activation_fn'] = f
        return fully_connected(*args, **kwargs)

Classes

class LayerBuilder

docstring for LayerBuilder.

class LayerBuilder(Builder):
    """docstring for LayerBuilder."""

    @property
    def TensorBuilder(self):
        return TensorBuilder()._unit(self._f, self._refs)

Ancestors (in MRO)

  • LayerBuilder
  • phi.builder.Builder
  • phi.lambdas.Lambda
  • phi.dsl.Function
  • phi.dsl.Node
  • __builtin__.object

Class variables

var If

var Ref

Instance variables

var Obj

var Read

var Rec

var TensorBuilder

var Write

Methods

def __init__(

self, f)

def __init__(self, f):
    super(Lambda, self).__init__(f)
    self._f = f

def Context(

cls, *args)

Builder Core. Also available as a global function as phi.Context.

Returns the context object of the current dsl.With statemente.

Arguments

  • *args: By design Context accepts any number of arguments and completely ignores them.

This is a classmethod and it doesnt return a Builder/Lambda by design so it can be called directly:

from phi import P, Context, Obj

def read_file(z):
    f = Context()
    return f.read()

lines = P.Pipe(
    "text.txt",
    P.With( open,
        read_file,
        Obj.split("\n")
    )
)

Here we called Context with no arguments to get the context back, however, since you can also give this function an argument (which it will ignore) it can be passed to the DSL so we can rewrite the previous as:

from phi import P, Context, Obj

lines = P.Pipe(
    "text.txt",
    P.With( open,
        Context, # f
        Obj.read()
        Obj.split("\n")
    )
)

Context yields an exception when used outside of a With block.

Also see

  • phi.builder.Builder.Obj
  • dsl
@classmethod
def Context(cls, *args):
    """
ilder Core**. Also available as a global function as `phi.Context`.
rns the context object of the current `dsl.With` statemente.
guments**
*args**: By design `Context` accepts any number of arguments and completely ignores them.
 is a classmethod and it doesnt return a `Builder`/`Lambda` by design so it can be called directly:
from phi import P, Context, Obj
def read_file(z):
    f = Context()
    return f.read()
lines = P.Pipe(
    "text.txt",
    P.With( open,
        read_file,
        Obj.split("\\n")
    )
)
 we called `Context` with no arguments to get the context back, however, since you can also give this function an argument (which it will ignore) it can be passed to the DSL so we can rewrite the previous as:
from phi import P, Context, Obj
lines = P.Pipe(
    "text.txt",
    P.With( open,
        Context, # f
        Obj.read()
        Obj.split("\\n")
    )
)
text` yields an exception when used outside of a `With` block.
so see**
hi.builder.Builder.Obj`
sl](https://cgarciae.github.io/phi/dsl.m.html)
    """
    if dsl.With.GLOBAL_CONTEXT is dsl._NO_VALUE:
        raise Exception("Cannot use 'Context' outside of a 'With' block")
    return dsl.With.GLOBAL_CONTEXT

def DoRegisterMethod(

cls, fn, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation='', method_type=<function identity at 0x7fe86c6f1848>, explain=True)

This method enables you to register any function fn that takes an Applicative as its first argument as a method of the Builder class.

Arguments

  • fn: a function that atleast takes an Applicative as its first argument.
  • library_path: the route of the librar from which this function was taken, used for documentation purposes.
  • alias: allows you to specify the name of the method, it will take the name of the function if its None.
  • doc: the documentation for the method, if None a predefied documentation will be generated based on the documentation of fn.

Return

None

Examples

@classmethod
def DoRegisterMethod(cls, fn, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True):
    """
    This method enables you to register any function `fn` that takes an Applicative as its first argument as a method of the Builder class.
    **Arguments**
    * `fn`: a function that atleast takes an Applicative as its first argument.
    * `library_path`: the route of the librar from which this function was taken, used for documentation purposes.
    * `alias`: allows you to specify the name of the method, it will take the name of the function if its `None`.
    * `doc`: the documentation for the method, if `None` a predefied documentation will be generated based on the documentation of `fn`.
    **Return**
    `None`
    **Examples**
    """
    if wrapped:
        fn = functools.wraps(wrapped)(fn)
    fn_signature = utils.get_method_sig(fn)
 	fn_docs = inspect.getdoc(fn)
    name = alias if alias else fn.__name__
    original_name = fn.__name__ if wrapped else original_name if original_name else name
    fn.__name__ = name
    fn.__doc__ = doc if doc else ("""
 METHOD IS AUTOMATICALLY GENERATED
builder.{1}(*args, **kwargs)
ccepts the same arguments as `{3}.{0}`. """ + explanation + """
}.{0}**
{2}
    """).format(original_name, name, fn_docs, library_path) if explain else fn_docs
    if name in cls.__core__:
        raise Exception("Can't add method '{0}' because its on __core__".format(name))
    fn = method_type(fn)
    setattr(cls, name, fn)

def Make(

self, *code, **kwargs)

The Make method takes an expression from the DSL and compiles it to a function.

Arguments

  • *code: any expression from the DSL.code is implicitly a tuple since that is what Python gives you when you declare a Variadic Function, therefore, according to the rules of the DSL, all expressions inside of code will be composed together. See Composition.
  • flatten = False: if flatten is True and the argument being returned by the compiled function is a list it will instead return a flattened list.
  • _return_type = None: By default Make returns an object of the same class e.g. Builder, however you can pass in a custom class that inherits from Builder as the returned contianer. This is useful if the custom builder has specialized methods.
  • create_ref_context = True: determines if a reference manager should be created on compilation. See Compile.
  • refs = True: external/default values for references passed during compilation. See Compile.

Examples

from phi import P

def add1(x): return x + 1
def mul3(x): return x * 3

f = P.Make(
    add1,
    mul3
)

assert f(1) == 6

Here f is equivalent to

def f(x): x = add1(x) x = mul3(x) return x

The previous example using lambdas to create the functions

from phi import P

f = P.Make(
    P + 1,
    P * 3
)

assert f(1) == 6

Also see

def Make(self, *code, **kwargs):
    """
`Make` method takes an expression from the DSL and compiles it to a function.
guments**
*code**: any expression from the DSL.`code` is implicitly a `tuple` since that is what Python gives you when you declare a [Variadic Function](https://docs.python.org/3/tutorial/controlflow.html#arbitrary-argument-lists), therefore, according to the rules of the DSL, all expressions inside of `code` will be composed together. See [Composition](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Composition).
latten = False*: if `flatten` is True and the argument being returned by the compiled function is a `list` it will instead return a flattened list.
return_type = None*: By default `Make` returns an object of the same class e.g. `Builder`, however you can pass in a custom class that inherits from `Builder` as the returned contianer. This is useful if the custom builder has specialized methods.
reate_ref_context = True*: determines if a reference manager should be created on compilation. See [Compile](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Compile).
efs = True*: external/default values for references passed during compilation. See [Compile](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Compile).
amples**
from phi import P
def add1(x): return x + 1
def mul3(x): return x * 3
f = P.Make(
    add1,
    mul3
)
assert f(1) == 6
 `f` is equivalent to
f(x):
x = add1(x)
x = mul3(x)
return x
previous example using [lambdas](https://cgarciae.github.io/phi/lambdas.m.html) to create the functions
from phi import P
f = P.Make(
    P + 1,
    P * 3
)
assert f(1) == 6
so see**
sl](https://cgarciae.github.io/phi/dsl.m.html)
ompile](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Compile)
ambdas](https://cgarciae.github.io/phi/lambdas.m.html)
    """
    _return_type = kwargs.get('_return_type', None)
    flatten = kwargs.get('flatten', False)
    refs = kwargs.get('refs', {})
    create_ref_context = kwargs.get('create_ref_context', True)
    # code = (self, code)
    if flatten:
        code = (code, lambda x: utils.flatten_list(x) if type(x) is list else x)
    f = dsl.Compile(code, refs, create_ref_context=create_ref_context)
    return self.__then__(f, _return_type=_return_type)

def NMake(

self, *args, **kwargs)

NMake is shortcut for Make(..., create_ref_context=False), its full name should be NoCreateRefContextMake but its impractically long. Normally methods that compile DSL expressions like phi.builder.Builder.Make or phi.builder.Builder.Pipe create a reference context unless especified, these contexts encapsulate references (see read or write) and prevent them from leaking, which is good. There are times however when you consciously want a sub-Make or sub-Pipe expression to read or write references from the main Make or Pipe expression, for this you need to set create_ref_context to False.

Arguments

  • Same arguments as phi.builder.Builder.Make but...
  • create_ref_context is hardcoded to False

Examples

If you compile a sub expression as a function for another expression e.g.

from phi import P

assert 1 == P.Pipe(
    1, {'s'}, # write s == 1, outer context
    P.Make(
        P + 1, {'s'} # write s == 2, inner context
    ),
    's'  # read s == 1, outer context
)

you find that references are not shared. However if you avoid the creation of a new reference context via a keyword arguments

from phi import P

assert 2 == P.Pipe(
    1, {'s'},   #write s == 1, same context
    P.Make(
        P + 1, {'s'},   #write s == 2, same context
        create_ref_context=False
    ),
    's'   # read s == 2, same context
)

you can achieve what you want. Yet writting create_ref_context=False is a little cumbersome, so to make things nicer we just use a shortcut by appending an N at the beggining of the NMake method

from phi import P

assert 2 == P.Pipe(
    1, {'s'},   #write s == 1, same context
    P.NMake(
        P + 1, {'s'}   #write s == 2, same context
    ),
    's'   # read s == 2, same context
)

Also see

  • phi.builder.Builder.Make
  • phi.builder.Builder.NPipe
  • phi.builder.Builder.NRun
  • dsl
  • Compile
def NMake(self, *args, **kwargs):
    """
ke` is shortcut for `Make(..., create_ref_context=False)`, its full name should be *NoCreateRefContextMake* but its impractically long. Normally methods that [compile](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Compile) DSL expressions like `phi.builder.Builder.Make` or `phi.builder.Builder.Pipe` create a reference context unless especified, these contexts encapsulate references (see [read](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Read) or [write](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Write)) and prevent them from leaking, which is good. There are times however when you consciously want a sub-Make or sub-Pipe expression to read or write references from the main Make or Pipe expression, for this you need to set `create_ref_context` to `False`.
guments**
me arguments as `phi.builder.Builder.Make` but...
create_ref_context** is hardcoded to `False`
amples**
ou compile a sub expression as a function for another expression e.g.
from phi import P
assert 1 == P.Pipe(
    1, {'s'}, # write s == 1, outer context
    P.Make(
        P + 1, {'s'} # write s == 2, inner context
    ),
    's'  # read s == 1, outer context
)
find that references are not shared. However if you avoid the creation of a new reference context via a keyword arguments
from phi import P
assert 2 == P.Pipe(
    1, {'s'},   #write s == 1, same context
    P.Make(
        P + 1, {'s'},   #write s == 2, same context
        create_ref_context=False
    ),
    's'   # read s == 2, same context
)
can achieve what you want. Yet writting `create_ref_context=False` is a little cumbersome, so to make things nicer we just use a shortcut by appending an `N` at the beggining of the `NMake` method
from phi import P
assert 2 == P.Pipe(
    1, {'s'},   #write s == 1, same context
    P.NMake(
        P + 1, {'s'}   #write s == 2, same context
    ),
    's'   # read s == 2, same context
)
so see**
hi.builder.Builder.Make`
hi.builder.Builder.NPipe`
hi.builder.Builder.NRun`
sl](https://cgarciae.github.io/phi/dsl.m.html)
ompile](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Compile)
    """
    kwargs['create_ref_context'] = False
    return self.Make(*args, **kwargs)

def NPipe(

self, x, *code, **kwargs)

NPipe is shortcut for Pipe(..., create_ref_context=False), its full name should be NoCreateRefContextPipe but its impractically long. Normally methods that compile DSL expressions like phi.builder.Builder.Make or phi.builder.Builder.Pipe create a reference context unless especified, these contexts encapsulate references (see read or write) and prevent them from leaking, which is good. There are times however when you consciously want a sub-Make or sub-Pipe expression to read or write references from the main Make or Pipe expression, for this you need to set create_ref_context to False.

Arguments

  • Same arguments as phi.builder.Builder.Pipe but...
  • create_ref_context is hardcoded to False

Examples

If you compile a sub expression as a function for another expression e.g.

from phi import P

assert 1 == P.Pipe(
    1, {'s'}, # write s == 1, outer context
    lambda x: P.Pipe(
        x,
        P + 1, {'s'} # write s == 2, inner context
    ),
    's'  # read s == 1, outer context
)

you find that references are not shared. However if you avoid the creation of a new reference context via a keyword arguments

from phi import P

assert 2 == P.Pipe(
    1, {'s'},   #write s == 1, same context
    lambda x: P.Pipe(
        x,
        P + 1, {'s'},   #write s == 2, same context
        create_ref_context=False
    ),
    's'   # read s == 2, same context
)

you can achieve what you want. Yet writting create_ref_context=False is a little cumbersome, so to make things nicer we just use a shortcut by appending an N at the beggining of the NPipe method

from phi import P

assert 2 == P.Pipe(
    1, {'s'},   #write s == 1, same context
    lambda x: P.NPipe(
        x,
        P + 1, {'s'}   #write s == 2, same context
    ),
    's'   # read s == 2, same context
)

Also see

  • phi.builder.Builder.Pipe
  • phi.builder.Builder.NMake
  • phi.builder.Builder.NRun
  • dsl
  • Compile
def NPipe(self, x, *code, **kwargs):
    """
pe` is shortcut for `Pipe(..., create_ref_context=False)`, its full name should be *NoCreateRefContextPipe* but its impractically long. Normally methods that [compile](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Compile) DSL expressions like `phi.builder.Builder.Make` or `phi.builder.Builder.Pipe` create a reference context unless especified, these contexts encapsulate references (see [read](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Read) or [write](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Write)) and prevent them from leaking, which is good. There are times however when you consciously want a sub-Make or sub-Pipe expression to read or write references from the main Make or Pipe expression, for this you need to set `create_ref_context` to `False`.
guments**
me arguments as `phi.builder.Builder.Pipe` but...
create_ref_context** is hardcoded to `False`
amples**
ou compile a sub expression as a function for another expression e.g.
from phi import P
assert 1 == P.Pipe(
    1, {'s'}, # write s == 1, outer context
    lambda x: P.Pipe(
        x,
        P + 1, {'s'} # write s == 2, inner context
    ),
    's'  # read s == 1, outer context
)
find that references are not shared. However if you avoid the creation of a new reference context via a keyword arguments
from phi import P
assert 2 == P.Pipe(
    1, {'s'},   #write s == 1, same context
    lambda x: P.Pipe(
        x,
        P + 1, {'s'},   #write s == 2, same context
        create_ref_context=False
    ),
    's'   # read s == 2, same context
)
can achieve what you want. Yet writting `create_ref_context=False` is a little cumbersome, so to make things nicer we just use a shortcut by appending an `N` at the beggining of the `NPipe` method
from phi import P
assert 2 == P.Pipe(
    1, {'s'},   #write s == 1, same context
    lambda x: P.NPipe(
        x,
        P + 1, {'s'}   #write s == 2, same context
    ),
    's'   # read s == 2, same context
)
so see**
hi.builder.Builder.Pipe`
hi.builder.Builder.NMake`
hi.builder.Builder.NRun`
sl](https://cgarciae.github.io/phi/dsl.m.html)
ompile](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Compile)
    """
    return self.NMake(*code, **kwargs)(x)

def NRun(

self, *code, **kwargs)

NRun is shortcut for Run(..., create_ref_context=False), its full name should be NoCreateRefContextRun but its impractically long.

Also see

  • phi.builder.Builder.Run
  • phi.builder.Builder.NMake
  • phi.builder.Builder.NPipe
  • dsl
  • Compile
def NRun(self, *code, **kwargs):
    """
n` is shortcut for `Run(..., create_ref_context=False)`, its full name should be *NoCreateRefContextRun* but its impractically long.
so see**
hi.builder.Builder.Run`
hi.builder.Builder.NMake`
hi.builder.Builder.NPipe`
sl](https://cgarciae.github.io/phi/dsl.m.html)
ompile](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Compile)
    """
    return self.NPipe(None, *code, **kwargs)

def Pipe(

self, x, *code, **kwargs)

Pipe is method that takes an input argument plus an expression from the DSL, it compiles the expression and applies the resulting function to the input. Its highly inspired by Elixir's |> (pipe) operator.

Arguments

  • x: any input object
  • *code: any expression from the DSL.code is implicitly a tuple since that is what Python gives you when you declare a Variadic Function, therefore, according to the rules of the DSL, all expressions inside of code will be composed together. See Composition.
  • **kwargs: Pipe forwards all kwargs to phi.builder.Builder.Make, visit its documentation for more info.

Examples

from phi import P

def add1(x): return x + 1
def mul3(x): return x * 3

x = P.Pipe(
    1,     #input
    add1,  #1 + 1 == 2
    mul3   #2 * 3 == 6
)

assert x == 6

The previous using lambdas to create the functions

from phi import P

x = P.Pipe(
    1,      #input
    P + 1,  #1 + 1 == 2
    P * 3   #2 * 3 == 6
)

assert x == 6

Also see

def Pipe(self, x, *code, **kwargs):
    """
e` is method that takes an input argument plus an expression from the DSL, it compiles the expression and applies the resulting function to the input. Its highly inspired by Elixir's [|> (pipe)](https://hexdocs.pm/elixir/Kernel.html#%7C%3E/2) operator.
guments**
x**: any input object
*code**: any expression from the DSL.`code` is implicitly a `tuple` since that is what Python gives you when you declare a [Variadic Function](https://docs.python.org/3/tutorial/controlflow.html#arbitrary-argument-lists), therefore, according to the rules of the DSL, all expressions inside of `code` will be composed together. See [Composition](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Composition).
**kwargs**: `Pipe` forwards all `kwargs` to `phi.builder.Builder.Make`, visit its documentation for more info.
amples**
from phi import P
def add1(x): return x + 1
def mul3(x): return x * 3
x = P.Pipe(
    1,     #input
    add1,  #1 + 1 == 2
    mul3   #2 * 3 == 6
)
assert x == 6
previous using [lambdas](https://cgarciae.github.io/phi/lambdas.m.html) to create the functions
from phi import P
x = P.Pipe(
    1,      #input
    P + 1,  #1 + 1 == 2
    P * 3   #2 * 3 == 6
)
assert x == 6
so see**
hi.builder.Builder.Make`
hi.builder.Builder.Run`
sl](https://cgarciae.github.io/phi/dsl.m.html)
ompile](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Compile)
ambdas](https://cgarciae.github.io/phi/lambdas.m.html)
    """
    return self.Make(*code, **kwargs)(x)

def Register0(

cls, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation='', method_type=<function identity at 0x7fe86c6f1848>, explain=True, _return_type=None)

@classmethod
def Register0(cls, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None):
    def register_decorator(fn):
        cls.RegisterFunction0(fn, library_path, alias=alias, original_name=original_name, doc=doc, wrapped=wrapped, explanation=explanation, method_type=method_type, explain=explain, _return_type=_return_type)
        return fn
    return register_decorator

def Register1(

cls, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation='', method_type=<function identity at 0x7fe86c6f1848>, explain=True, _return_type=None)

@classmethod
def Register1(cls, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None):
    def register_decorator(fn):
        _wrapped = wrapped if wrapped else fn
        cls.RegisterFunction1(fn, library_path, alias=alias, original_name=original_name, doc=doc, wrapped=_wrapped, explanation=explanation, method_type=method_type, explain=explain, _return_type=_return_type)
        return fn
    return register_decorator

def Register2(

cls, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation='', method_type=<function identity at 0x7fe86c6f1848>, explain=True, _return_type=None)

@classmethod
def Register2(cls, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None):
    def register_decorator(fn):
        cls.RegisterFunction2(fn, library_path, alias=alias, original_name=original_name, doc=doc, wrapped=wrapped, explanation=explanation, method_type=method_type, explain=explain, _return_type=_return_type)
        return fn
    return register_decorator

def Register3(

cls, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation='', method_type=<function identity at 0x7fe86c6f1848>, explain=True, _return_type=None)

@classmethod
def Register3(cls, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None):
    def register_decorator(fn):
        cls.RegisterFunction3(fn, library_path, alias=alias, original_name=original_name, doc=doc, wrapped=wrapped, explanation=explanation, method_type=method_type, explain=explain, _return_type=_return_type)
        return fn
    return register_decorator

def Register4(

cls, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation='', method_type=<function identity at 0x7fe86c6f1848>, explain=True, _return_type=None)

@classmethod
def Register4(cls, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None):
    def register_decorator(fn):
        cls.RegisterFunction4(fn, library_path, alias=alias, original_name=original_name, doc=doc, wrapped=wrapped, explanation=explanation, method_type=method_type, explain=explain, _return_type=_return_type)
        return fn
    return register_decorator

def Register5(

cls, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation='', method_type=<function identity at 0x7fe86c6f1848>, explain=True, _return_type=None)

@classmethod
def Register5(cls, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None):
    def register_decorator(fn):
        cls.RegisterFunction5(fn, library_path, alias=alias, original_name=original_name, doc=doc, wrapped=wrapped, explanation=explanation, method_type=method_type, explain=explain, _return_type=_return_type)
        return fn
    return register_decorator

def RegisterFunction0(

cls, fn, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation='', method_type=<function identity at 0x7fe86c6f1848>, explain=True, _return_type=None)

@classmethod
def RegisterFunction0(cls, fn, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None):
    """
    """
    @functools.wraps(fn)
    def method(self, *args, **kwargs):
        kwargs['_return_type'] = _return_type
        return self.Then0(fn, *args, **kwargs)
    explanation = """
ver, a partial with the arguments is returned which expects any argument `x` and complete ignores it, such that
{3}.{0}(*args, **kwargs)
quivalent to
builder.{1}(*args, **kwargs)(x)
    """ + explanation if explain else ""
    cls.DoRegisterMethod(method, library_path, alias=alias, original_name=original_name, doc=doc, wrapped=wrapped, explanation=explanation, method_type=method_type, explain=explain)

def RegisterFunction1(

cls, fn, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation='', method_type=<function identity at 0x7fe86c6f1848>, explain=True, _return_type=None)

This method enables you to register any function fn that takes an object as its first argument as a method of the Builder and Applicative class.

Arguments

  • fn: a function that atleast takes an Object as its first argument.
  • library_path: the route of the librar from which this function was taken, used for documentation purposes.
  • alias: allows you to specify the name of the method, it will take the name of the function if its None.
  • doc: the documentation for the method, if None a predefied documentation will be generated based on the documentation of fn.

Return

None

Examples

@classmethod
def RegisterFunction1(cls, fn, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None):
    """
    This method enables you to register any function `fn` that takes an object as its first argument as a method of the Builder and Applicative class.
    **Arguments**
    * `fn`: a function that atleast takes an Object as its first argument.
    * `library_path`: the route of the librar from which this function was taken, used for documentation purposes.
    * `alias`: allows you to specify the name of the method, it will take the name of the function if its `None`.
    * `doc`: the documentation for the method, if `None` a predefied documentation will be generated based on the documentation of `fn`.
    **Return**
    `None`
    **Examples**
    """
    @functools.wraps(fn)
    def method(self, *args, **kwargs):
        kwargs['_return_type'] = _return_type
        return self.Then(fn, *args, **kwargs)
    explanation = """
ver, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that
{3}.{0}(x1, *args, **kwargs)
quivalent to
builder.{1}(*args, **kwargs)(x1)
    """ + explanation  if explain else ""
    cls.DoRegisterMethod(method, library_path, alias=alias, original_name=original_name, doc=doc, wrapped=wrapped, explanation=explanation, method_type=method_type, explain=explain)

def RegisterFunction2(

cls, fn, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation='', method_type=<function identity at 0x7fe86c6f1848>, explain=True, _return_type=None)

@classmethod
def RegisterFunction2(cls, fn, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None):
    """
    """
    @functools.wraps(fn)
    def method(self, *args, **kwargs):
        kwargs['_return_type'] = _return_type
        return self.Then2(fn, *args, **kwargs)
    explanation = """
ver, the 2nd argument is omitted, a partial with the rest of the arguments is returned which expects the 2nd argument such that
{3}.{0}(x1, x2, *args, **kwargs)
quivalent to
builder.{1}(x1, *args, **kwargs)(x2)
    """ + explanation if explain else ""
    cls.DoRegisterMethod(method, library_path, alias=alias, original_name=original_name, doc=doc, wrapped=wrapped, explanation=explanation, method_type=method_type, explain=explain)

def RegisterFunction3(

cls, fn, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation='', method_type=<function identity at 0x7fe86c6f1848>, explain=True, _return_type=None)

@classmethod
def RegisterFunction3(cls, fn, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None):
    """
    """
    @functools.wraps(fn)
    def method(self, *args, **kwargs):
        kwargs['_return_type'] = _return_type
        return self.Then3(fn, *args, **kwargs)
    explanation = """
ver, the 3rd argument is omitted, a partial with the rest of the arguments is returned which expects the 3rd argument such that
{3}.{0}(x1, x2, x3, *args, **kwargs)
quivalent to
builder.{1}(x1, x2, *args, **kwargs)(x3)
    """ + explanation if explain else ""
    cls.DoRegisterMethod(method, library_path, alias=alias, original_name=original_name, doc=doc, wrapped=wrapped, explanation=explanation, method_type=method_type, explain=explain)

def RegisterFunction4(

cls, fn, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation='', method_type=<function identity at 0x7fe86c6f1848>, explain=True, _return_type=None)

@classmethod
def RegisterFunction4(cls, fn, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None):
    """
    """
    @functools.wraps(fn)
    def method(self, *args, **kwargs):
        kwargs['_return_type'] = _return_type
        return self.Then4(fn, *args, **kwargs)
    explanation = """
ver, the 4th argument is omitted, a partial with the rest of the arguments is returned which expects the 4th argument such that
{3}.{0}(x1, x2, x3, x4, *args, **kwargs)
quivalent to
builder.{1}(x1, x2, x3, *args, **kwargs)(x4)
    """ + explanation if explain else ""
    cls.DoRegisterMethod(method, library_path, alias=alias, original_name=original_name, doc=doc, wrapped=wrapped, explanation=explanation, method_type=method_type, explain=explain)

def RegisterFunction5(

cls, fn, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation='', method_type=<function identity at 0x7fe86c6f1848>, explain=True, _return_type=None)

@classmethod
def RegisterFunction5(cls, fn, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None):
    """
    """
    @functools.wraps(fn)
    def method(self, *args, **kwargs):
        kwargs['_return_type'] = _return_type
        return self.Then5(fn, *args, **kwargs)
    explanation = """
ver, the 5th argument is omitted, a partial with the rest of the arguments is returned which expects the 5th argument such that
{3}.{0}(x1, x2, x3, x4, x5, *args, **kwargs)
quivalent to
builder.{1}(x1, x2, x3, x4, *args, **kwargs)(x5)
    """ + explanation if explain else ""
    cls.DoRegisterMethod(method, library_path, alias=alias, original_name=original_name, doc=doc, wrapped=wrapped, explanation=explanation, method_type=method_type, explain=explain, _return_type=_return_type)

def RegisterMethod(

cls, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation='', method_type=<function identity at 0x7fe86c6f1848>, explain=True)

@classmethod
def RegisterMethod(cls, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True):
    def register_decorator(fn):
        cls.DoRegisterMethod(fn, library_path, alias=alias, original_name=original_name, doc=doc, wrapped=wrapped, explanation=explanation, method_type=method_type, explain=explain)
        return fn
    return register_decorator

def Run(

self, *code, **kwargs)

Run(*code, **kwargs) is equivalent to Pipe(None, *code, **kwargs), that is, it compiles the code and applies in a None value.

Arguments

  • Same as phi.builder.Builder.Make.

Examples

You might create code that totally ignores its input argument e.g.

from phi import P

result = P.Pipe(
    None,
    dict(
        x = (
            Val(10),
            P + 1
        ),
        y = (
            Val(5),
            P * 5
        )
    )
)

assert result.x == 9
assert result.y == 25

Here the Val statemente drops the None and introduces its own constants. Given this its more suitable to use Run

from phi import P

result = P.Run(
    dict(
        x = (
            Val(10),
            P + 1
        ),
        y = (
            Val(5),
            P * 5
        )
    )
)

assert result.x == 9
assert result.y == 25

Also see

  • phi.builder.Builder.Make
  • phi.builder.Builder.Pipe
  • dsl
  • Compile
def Run(self, *code, **kwargs):
    """
(*code, **kwargs)` is equivalent to `Pipe(None, *code, **kwargs)`, that is, it compiles the code and applies in a `None` value.
guments**
me as `phi.builder.Builder.Make`.
amples**
might create code that totally ignores its input argument e.g.
from phi import P
result = P.Pipe(
    None,
    dict(
        x = (
            Val(10),
            P + 1
        ),
        y = (
            Val(5),
            P * 5
        )
    )
)
assert result.x == 9
assert result.y == 25
 the `Val` statemente drops the `None` and introduces its own constants. Given this its more suitable to use `Run`
from phi import P
result = P.Run(
    dict(
        x = (
            Val(10),
            P + 1
        ),
        y = (
            Val(5),
            P * 5
        )
    )
)
assert result.x == 9
assert result.y == 25
so see**
hi.builder.Builder.Make`
hi.builder.Builder.Pipe`
sl](https://cgarciae.github.io/phi/dsl.m.html)
ompile](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Compile)
    """
    return self.Pipe(None, *code, **kwargs)

def Then(

self, expr, *args, **kwargs)

def Then(self, expr, *args, **kwargs):
    """
    """
    return self.ThenAt(0, expr, *args, **kwargs)

def Then0(

self, expr, *args, **kwargs)

def Then0(self, expr, *args, **kwargs):
    """
    """
    return self.ThenAt(-1, expr, *args, **kwargs)

def Then1(

self, expr, *args, **kwargs)

def Then(self, expr, *args, **kwargs):
    """
    """
    return self.ThenAt(0, expr, *args, **kwargs)

def Then2(

self, expr, arg1, *args, **kwargs)

def Then2(self, expr, arg1, *args, **kwargs):
    """
    """
    args = (arg1,) + args
    return self.ThenAt(1, expr, *args, **kwargs)

def Then3(

self, expr, arg1, arg2, *args, **kwargs)

def Then3(self, expr, arg1, arg2, *args, **kwargs):
    """
    """
    args = (arg1, arg2) + args
    return self.ThenAt(2, expr, *args, **kwargs)

def Then4(

self, expr, arg1, arg2, arg3, *args, **kwargs)

def Then4(self, expr, arg1, arg2, arg3, *args, **kwargs):
    """
    """
    args = (arg1, arg2, arg3) + args
    return self.ThenAt(3, expr, *args, **kwargs)

def Then5(

self, expr, arg1, arg2, arg3, arg4, *args, **kwargs)

def Then5(self, expr, arg1, arg2, arg3, arg4, *args, **kwargs):
    """
    """
    args = (arg1, arg2, arg3, arg4) + args
    return self.ThenAt(4, expr, *args, **kwargs)

def ThenAt(

self, n, expr, *args, **kwargs)

def ThenAt(self, n, expr, *args, **kwargs):
    _return_type = None
    if '_return_type' in kwargs:
        _return_type = kwargs['_return_type']
        del kwargs['_return_type']
    def _lambda(x):
        x = self(x)
        new_args = args[0:n] + (x,) + args[n:] if n >= 0 else args
        return expr(*new_args, **kwargs)
    return self.__unit__(_lambda, _return_type=_return_type)

def Val(

self, x)

def Val(self, x):
    """
    """
    return self.__then__(lambda z: x)

def With(

self, *args, **kwargs)

With

def With(context_manager, *body):

Arguments

  • context_manager: a context manager object or valid expression from the DSL that returns a context manager.
  • *body: any valid expression of the DSL to be evaluated inside the context. *body is interpreted as a tuple so all expression contained are composed.

As with normal python programs you sometimes might want to create a context for a block of code. You normally give a context manager to the with statemente, in Phi you use P.With or phi.With

Context

Python's with statemente returns a context object through as keyword, in the DSL this object can be obtained using the P.Context method or the phi.Context function.

Examples

from phi import P, Obj, Context, With, Pipe

text = Pipe(
    "text.txt",
    With( open, Context,
        Obj.read()
    )
)

The previous is equivalent to

with open("text.txt") as f:
    text = f.read()
def With(self, *args, **kwargs):
    return self.NMake(dsl.With(*args, **kwargs))

def apply_regularization(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.apply_regularization(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.apply_regularization. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.apply_regularization(x1, *args, **kwargs)

is equivalent to

builder.apply_regularization(*args, **kwargs)(x1)

tf.contrib.layers.apply_regularization

Returns the summed penalty by applying `regularizer` to the `weights_list`.

Adding a regularization penalty over the layer weights and embedding weights can help prevent overfitting the training data. Regularization over layer biases is less common/useful, but assuming proper data preprocessing/mean subtraction, it usually shouldn't hurt much either.

Args: regularizer: A function that takes a single Tensor argument and returns a scalar Tensor output. weights_list: List of weights Tensors or Variables to apply regularizer over. Defaults to the GraphKeys.WEIGHTS collection if None.

Returns: A scalar representing the overall regularization penalty.

Raises: ValueError: If regularizer does not return a scalar output, or if we find no weights.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def assert_summary_tag_unique(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.assert_summary_tag_unique(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.assert_summary_tag_unique. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.assert_summary_tag_unique(x1, *args, **kwargs)

is equivalent to

builder.assert_summary_tag_unique(*args, **kwargs)(x1)

tf.contrib.layers.assert_summary_tag_unique

None
@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def avg_pool2d(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.avg_pool2d(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.avg_pool2d. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.avg_pool2d(x1, *args, **kwargs)

is equivalent to

builder.avg_pool2d(*args, **kwargs)(x1)

tf.contrib.layers.avg_pool2d

Adds a 2D average pooling op.

It is assumed that the pooling is done per image but not in batch or channels.

Args: inputs: A Tensor of size [batch_size, height, width, channels]. kernel_size: A list of length 2: [kernel_height, kernel_width] of the pooling kernel over which the op is computed. Can be an int if both values are the same. stride: A list of length 2: [stride_height, stride_width]. Can be an int if both strides are the same. Note that presently both strides must have the same value. padding: The padding method, either 'VALID' or 'SAME'. outputs_collections: The collections to which the outputs are added. scope: Optional scope for name_scope.

Returns: A Tensor representing the results of the pooling operation.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def batch_norm(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.batch_norm(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.batch_norm. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.batch_norm(x1, *args, **kwargs)

is equivalent to

builder.batch_norm(*args, **kwargs)(x1)

tf.contrib.layers.batch_norm

Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.

"Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift"

Sergey Ioffe, Christian Szegedy

Can be used as a normalizer function for conv2d and fully_connected.

Note: When is_training is True the moving_mean and moving_variance need to be updated, by default the update_ops are placed in tf.GraphKeys.UPDATE_OPS so they need to be added as a dependency to the train_op, example:

update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) if update_ops: updates = tf.group(*update_ops) total_loss = control_flow_ops.with_dependencies([updates], total_loss)

One can set update_collections=None to force the updates in place, but that can have speed penalty, specially in distributed settings.

Args: inputs: a tensor with 2 or more dimensions, where the first dimension has batch_size. The normalization is over all but the last dimension. decay: decay for the moving average. center: If True, subtract beta. If False, beta is ignored. scale: If True, multiply by gamma. If False, gamma is not used. When the next layer is linear (also e.g. nn.relu), this can be disabled since the scaling can be done by the next layer. epsilon: small float added to variance to avoid dividing by zero. activation_fn: activation function, default set to None to skip it and maintain a linear activation. updates_collections: collections to collect the update ops for computation. The updates_ops need to be excuted with the train_op. If None, a control dependency would be added to make sure the updates are computed in place. is_training: whether or not the layer is in training mode. In training mode it would accumulate the statistics of the moments into moving_mean and moving_variance using an exponential moving average with the given decay. When it is not in training mode then it would use the values of the moving_mean and the moving_variance. reuse: whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. variables_collections: optional collections for the variables. outputs_collections: collections to add the outputs. trainable: If True also add variables to the graph collection GraphKeys.TRAINABLE_VARIABLES (see tf.Variable). scope: Optional scope for variable_scope.

Returns: A Tensor representing the output of the operation.

Raises: ValueError: if rank or last dimension of inputs is undefined.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def bias_add(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.bias_add(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.bias_add. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.bias_add(x1, *args, **kwargs)

is equivalent to

builder.bias_add(*args, **kwargs)(x1)

tf.contrib.layers.bias_add

Adds a bias to the inputs.

Can be used as a normalizer function for conv2d and fully_connected.

Args: inputs: a tensor of with at least rank 2 and value for the last dimension, e.g. [batch_size, depth], [None, None, None, depth]. activation_fn: activation function, default set to None to skip it and maintain a linear activation. initializer: An initializer for the bias, defaults to 0. regularizer: A regularizer like the result of l1_regularizer or l2_regularizer. reuse: whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. variables_collections: optional collections for the variables. outputs_collections: collections to add the outputs. trainable: If True also add variables to the graph collection GraphKeys.TRAINABLE_VARIABLES (see tf.Variable). scope: Optional scope for variable_scope.

Returns: a tensor representing the result of adding biases to the inputs.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def binary_svm_target(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.binary_svm_target(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.binary_svm_target. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.binary_svm_target(x1, *args, **kwargs)

is equivalent to

builder.binary_svm_target(*args, **kwargs)(x1)

tf.contrib.layers.binary_svm_target

Creates a _TargetColumn for binary classification with SVMs.

The target column uses binary hinge loss.

Args: label_name: String, name of the key in label dict. Can be null if label is a tensor (single headed models). weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example.

Returns: An instance of _TargetColumn.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def bucketize(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.bucketize(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.bucketize. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.bucketize(x1, *args, **kwargs)

is equivalent to

builder.bucketize(*args, **kwargs)(x1)

tf.contrib.layers.bucketize

Bucketizes input_tensor by given boundaries.

See bucketize_op.cc for more details.

Args: input_tensor: A Tensor which will be bucketize. boundaries: A list of floats gives the boundaries. It has to be sorted. name: A name prefix for the returned tensors (optional).

Returns: A Tensor with type int32 which indicates the corresponding bucket for each value in input_tensor.

Raises: TypeError: If boundaries is not a list.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def bucketized_column(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.bucketized_column(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.bucketized_column. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.bucketized_column(x1, *args, **kwargs)

is equivalent to

builder.bucketized_column(*args, **kwargs)(x1)

tf.contrib.layers.bucketized_column

Creates a _BucketizedColumn.

Args: source_column: A _RealValuedColumn defining dense column. boundaries: A list of floats specifying the boundaries. It has to be sorted.

Returns: A _BucketizedColumn.

Raises: ValueError: if 'boundaries' is empty or not sorted.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def check_feature_columns(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.check_feature_columns(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.check_feature_columns. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.check_feature_columns(x1, *args, **kwargs)

is equivalent to

builder.check_feature_columns(*args, **kwargs)(x1)

tf.contrib.layers.check_feature_columns

Checks the validity of the set of FeatureColumns.

Args: feature_columns: A set of instances or subclasses of FeatureColumn.

Raises: ValueError: If there are duplicate feature column keys.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def convolution2d(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.convolution2d(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.convolution2d. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.convolution2d(x1, *args, **kwargs)

is equivalent to

builder.convolution2d(*args, **kwargs)(x1)

tf.contrib.layers.convolution2d

Adds a 2D convolution followed by an optional batch_norm layer.

convolution2d creates a variable called weights, representing the convolutional kernel, that is convolved with the inputs to produce a Tensor of activations. If a normalizer_fn is provided (such as batch_norm), it is then applied. Otherwise, if normalizer_fn is None and a biases_initializer is provided then a biases variable would be created and added the activations. Finally, if activation_fn is not None, it is applied to the activations as well.

Performs a'trous convolution with input stride equal to rate if rate is greater than one.

Args: inputs: a 4-D tensor [batch_size, height, width, channels]. num_outputs: integer, the number of output filters. kernel_size: a list of length 2 [kernel_height, kernel_width] of of the filters. Can be an int if both values are the same. stride: a list of length 2 [stride_height, stride_width]. Can be an int if both strides are the same. Note that presently both strides must have the same value. padding: one of VALID or SAME. rate: integer. If less than or equal to 1, a standard convolution is used. If greater than 1, than the a'trous convolution is applied and stride must be set to 1. activation_fn: activation function, set to None to skip it and maintain a linear activation. normalizer_fn: normalization function to use instead of biases. If normalizer_fn is provided then biases_initializer and biases_regularizer are ignored and biases are not created nor added. default set to None for no normalizer function normalizer_params: normalization function parameters. weights_initializer: An initializer for the weights. weights_regularizer: Optional regularizer for the weights. biases_initializer: An initializer for the biases. If None skip biases. biases_regularizer: Optional regularizer for the biases. reuse: whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. variables_collections: optional list of collections for all the variables or a dictionay containing a different list of collection per variable. outputs_collections: collection to add the outputs. trainable: If True also add variables to the graph collection GraphKeys.TRAINABLE_VARIABLES (see tf.Variable). scope: Optional scope for variable_scope.

Returns: a tensor representing the output of the operation.

Raises: ValueError: if both 'rate' and stride are larger than one.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def convolution2d_in_plane(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.convolution2d_in_plane(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.convolution2d_in_plane. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.convolution2d_in_plane(x1, *args, **kwargs)

is equivalent to

builder.convolution2d_in_plane(*args, **kwargs)(x1)

tf.contrib.layers.convolution2d_in_plane

Performs the same in-plane convolution to each channel independently.

This is useful for performing various simple channel-independent convolution operations such as image gradients:

image = tf.constant(..., shape=(16, 240, 320, 3)) vert_gradients = layers.conv2d_in_plane(image, kernel=[1, -1], kernel_size=[2, 1]) horz_gradients = layers.conv2d_in_plane(image, kernel=[1, -1], kernel_size=[1, 2])

Args: inputs: a 4-D tensor with dimensions [batch_size, height, width, channels]. kernel_size: a list of length 2 holding the [kernel_height, kernel_width] of of the pooling. Can be an int if both values are the same. stride: a list of length 2 [stride_height, stride_width]. Can be an int if both strides are the same. Note that presently both strides must have the same value. padding: the padding type to use, either 'SAME' or 'VALID'. activation_fn: activation function, set to None to skip it and maintain a linear activation. normalizer_fn: normalization function to use instead of biases. If normalizer_fn is provided then biases_initializer and biases_regularizer are ignored and biases are not created nor added. default set to None for no normalizer function normalizer_params: normalization function parameters. weights_initializer: An initializer for the weights. weights_regularizer: Optional regularizer for the weights. biases_initializer: An initializer for the biases. If None skip biases. biases_regularizer: Optional regularizer for the biases. reuse: whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. variables_collections: optional list of collections for all the variables or a dictionay containing a different list of collection per variable. outputs_collections: collection to add the outputs. trainable: If True also add variables to the graph collection GraphKeys.TRAINABLE_VARIABLES (see tf.Variable). scope: Optional scope for variable_scope.

Returns: A Tensor representing the output of the operation.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def convolution2d_transpose(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.convolution2d_transpose(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.convolution2d_transpose. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.convolution2d_transpose(x1, *args, **kwargs)

is equivalent to

builder.convolution2d_transpose(*args, **kwargs)(x1)

tf.contrib.layers.convolution2d_transpose

Adds a convolution2d_transpose with an optional batch normalization layer.

The function creates a variable called weights, representing the kernel, that is convolved with the input. If batch_norm_params is None, a second variable called 'biases' is added to the result of the operation.

Args: inputs: a tensor of size [batch_size, height, width, channels]. num_outputs: integer, the number of output filters. kernel_size: a list of length 2 holding the [kernel_height, kernel_width] of of the filters. Can be an int if both values are the same. stride: a list of length 2: [stride_height, stride_width]. Can be an int if both strides are the same. Note that presently both strides must have the same value. padding: one of 'VALID' or 'SAME'. activation_fn: activation function, set to None to skip it and maintain a linear activation. normalizer_fn: normalization function to use instead of biases. If normalizer_fn is provided then biases_initializer and biases_regularizer are ignored and biases are not created nor added. default set to None for no normalizer function normalizer_params: normalization function parameters. weights_initializer: An initializer for the weights. weights_regularizer: Optional regularizer for the weights. biases_initializer: An initializer for the biases. If None skip biases. biases_regularizer: Optional regularizer for the biases. reuse: whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. variables_collections: optional list of collections for all the variables or a dictionay containing a different list of collection per variable. outputs_collections: collection to add the outputs. trainable: whether or not the variables should be trainable or not. scope: Optional scope for variable_scope.

Returns: a tensor representing the output of the operation.

Raises: ValueError: if 'kernel_size' is not a list of length 2.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def create_feature_spec_for_parsing(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.create_feature_spec_for_parsing(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.create_feature_spec_for_parsing. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.create_feature_spec_for_parsing(x1, *args, **kwargs)

is equivalent to

builder.create_feature_spec_for_parsing(*args, **kwargs)(x1)

tf.contrib.layers.create_feature_spec_for_parsing

Helper that prepares features config from input feature_columns.

The returned feature config can be used as arg 'features' in tf.parse_example.

Typical usage example:

```python

Define features and transformations

country = sparse_column_with_vocabulary_file("country", VOCAB_FILE) age = real_valued_column("age") click_bucket = bucketized_column(real_valued_column("historical_click_ratio"), boundaries=[i/10. for i in range(10)]) country_x_click = crossed_column([country, click_bucket], 10)

feature_columns = set([age, click_bucket, country_x_click]) batch_examples = tf.parse_example( serialized_examples, create_feature_spec_for_parsing(feature_columns)) ```

For the above example, create_feature_spec_for_parsing would return the dict: {"age": parsing_ops.FixedLenFeature([1], dtype=tf.float32), "historical_click_ratio": parsing_ops.FixedLenFeature([1], dtype=tf.float32), "country": parsing_ops.VarLenFeature(tf.string)}

Args: feature_columns: An iterable containing all the feature columns. All items should be instances of classes derived from _FeatureColumn. Returns: A dict mapping feature keys to FixedLenFeature or VarLenFeature values.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def crossed_column(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.crossed_column(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.crossed_column. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.crossed_column(x1, *args, **kwargs)

is equivalent to

builder.crossed_column(*args, **kwargs)(x1)

tf.contrib.layers.crossed_column

Creates a _CrossedColumn.

Args: columns: An iterable of _FeatureColumn. Items can be an instance of _SparseColumn, _CrossedColumn, or _BucketizedColumn. hash_bucket_size: An int that is > 1. The number of buckets. combiner: A combiner string, supports sum, mean, sqrtn. ckpt_to_load_from: (Optional). String representing checkpoint name/pattern to restore the column weights. Required if tensor_name_in_ckpt is not None. tensor_name_in_ckpt: (Optional). Name of the Tensor in the provided checkpoint from which to restore the column weights. Required if ckpt_to_load_from is not None.

Returns: A _CrossedColumn.

Raises: TypeError: if any item in columns is not an instance of _SparseColumn, _CrossedColumn, or _BucketizedColumn, or hash_bucket_size is not an int. ValueError: if hash_bucket_size is not > 1 or len(columns) is not > 1.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def dropout(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.dropout(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.dropout. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.dropout(x1, *args, **kwargs)

is equivalent to

builder.dropout(*args, **kwargs)(x1)

tf.contrib.layers.dropout

Returns a dropout op applied to the input.

With probability keep_prob, outputs the input element scaled up by 1 / keep_prob, otherwise outputs 0. The scaling is so that the expected sum is unchanged.

Args: inputs: the tensor to pass to the nn.dropout op. keep_prob: A scalar Tensor with the same type as x. The probability that each element is kept. noise_shape: A 1-D Tensor of type int32, representing the shape for randomly generated keep/drop flags. is_training: A bool Tensor indicating whether or not the model is in training mode. If so, dropout is applied and values scaled. Otherwise, inputs is returned. outputs_collections: collection to add the outputs. scope: Optional scope for name_scope.

Returns: a tensor representing the output of the operation.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def embedding_column(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.embedding_column(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.embedding_column. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.embedding_column(x1, *args, **kwargs)

is equivalent to

builder.embedding_column(*args, **kwargs)(x1)

tf.contrib.layers.embedding_column

Creates an `_EmbeddingColumn`.

Args: sparse_id_column: A _SparseColumn which is created by for example sparse_column_with_* or crossed_column functions. Note that combiner defined in sparse_id_column is ignored. dimension: An integer specifying dimension of the embedding. combiner: A string specifying how to reduce if there are multiple entries in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each of this can be considered an example level normalization on the column: * "sum": do not normalize * "mean": do l1 normalization * "sqrtn": do l2 normalization For more information: tf.embedding_lookup_sparse. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to tf.truncated_normal_initializer with mean 0.0 and standard deviation 1/sqrt(sparse_id_column.length). ckpt_to_load_from: (Optional). String representing checkpoint name/pattern to restore the column weights. Required if tensor_name_in_ckpt is not None. tensor_name_in_ckpt: (Optional). Name of the Tensor in the provided checkpoint from which to restore the column weights. Required if ckpt_to_load_from is not None.

Returns: An _EmbeddingColumn.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def flatten(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.flatten(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.flatten. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.flatten(x1, *args, **kwargs)

is equivalent to

builder.flatten(*args, **kwargs)(x1)

tf.contrib.layers.flatten

Flattens the input while maintaining the batch_size.

Assumes that the first dimension represents the batch.

Args: inputs: a tensor of size [batch_size, ...]. outputs_collections: collection to add the outputs. scope: Optional scope for name_scope.

Returns: a flattened tensor with shape [batch_size, k]. Raises: ValueError: if inputs.shape is wrong.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def fully_connected(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.fully_connected(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.fully_connected. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.fully_connected(x1, *args, **kwargs)

is equivalent to

builder.fully_connected(*args, **kwargs)(x1)

tf.contrib.layers.fully_connected

Adds a fully connected layer.

fully_connected creates a variable called weights, representing a fully connected weight matrix, which is multiplied by the inputs to produce a Tensor of hidden units. If a normalizer_fn is provided (such as batch_norm), it is then applied. Otherwise, if normalizer_fn is None and a biases_initializer is provided then a biases variable would be created and added the hidden units. Finally, if activation_fn is not None, it is applied to the hidden units as well.

Note: that if inputs have a rank greater than 2, then inputs is flattened prior to the initial matrix multiply by weights.

Args: inputs: A tensor of with at least rank 2 and value for the last dimension, i.e. [batch_size, depth], [None, None, None, channels]. num_outputs: Integer or long, the number of output units in the layer. activation_fn: activation function, set to None to skip it and maintain a linear activation. normalizer_fn: normalization function to use instead of biases. If normalizer_fn is provided then biases_initializer and biases_regularizer are ignored and biases are not created nor added. default set to None for no normalizer function normalizer_params: normalization function parameters. weights_initializer: An initializer for the weights. weights_regularizer: Optional regularizer for the weights. biases_initializer: An initializer for the biases. If None skip biases. biases_regularizer: Optional regularizer for the biases. reuse: whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. variables_collections: Optional list of collections for all the variables or a dictionary containing a different list of collections per variable. outputs_collections: collection to add the outputs. trainable: If True also add variables to the graph collection GraphKeys.TRAINABLE_VARIABLES (see tf.Variable). scope: Optional scope for variable_scope.

Returns: the tensor variable representing the result of the series of operations.

Raises: ValueError: if x has rank less than 2 or if its last dimension is not set.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def get_default_binary_metrics_for_eval(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.get_default_binary_metrics_for_eval(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.get_default_binary_metrics_for_eval. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.get_default_binary_metrics_for_eval(x1, *args, **kwargs)

is equivalent to

builder.get_default_binary_metrics_for_eval(*args, **kwargs)(x1)

tf.contrib.layers.get_default_binary_metrics_for_eval

Returns a dictionary of basic metrics for logistic regression.

Args: thresholds: List of floating point thresholds to use for accuracy, precision, and recall metrics. If None, defaults to [0.5].

Returns: Dictionary mapping metrics string names to metrics functions.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def hashed_embedding_column(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.hashed_embedding_column(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.hashed_embedding_column. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.hashed_embedding_column(x1, *args, **kwargs)

is equivalent to

builder.hashed_embedding_column(*args, **kwargs)(x1)

tf.contrib.layers.hashed_embedding_column

Creates an embedding column of a sparse feature using parameter hashing.

The i-th embedding component of a value v is found by retrieving an embedding weight whose index is a fingerprint of the pair (v,i).

Args: column_name: A string defining sparse column name. size: An integer specifying the number of parameters in the embedding layer. dimension: An integer specifying dimension of the embedding. combiner: A string specifying how to reduce if there are multiple entries in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each of this can be thought as example level normalizations on the column: * "sum": do not normalize features in the column * "mean": do l1 normalization on features in the column * "sqrtn": do l2 normalization on features in the column For more information: tf.embedding_lookup_sparse. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to tf.truncated_normal_initializer with mean 0 and standard deviation 0.1.

Returns: A _HashedEmbeddingColumn.

Raises: ValueError: if dimension or size is not a positive integer; or if combiner is not supported.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def hashed_embedding_lookup(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.hashed_embedding_lookup(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.hashed_embedding_lookup. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.hashed_embedding_lookup(x1, *args, **kwargs)

is equivalent to

builder.hashed_embedding_lookup(*args, **kwargs)(x1)

tf.contrib.layers.hashed_embedding_lookup

Looks up embeddings using parameter hashing for each value in `values`.

The i-th embedding component of a value v in values is found by retrieving the weight whose index is a fingerprint of the pair (v,i). The concept is explored as "feature hashing" for model compression in this paper: http://arxiv.org/pdf/1504.04788.pdf

Feature hashing has the pleasant effect of allowing us to compute an embedding without needing a pre-determined vocabulary, relieving some amount of process complexity. It also allows for us to maintain embeddings for possibly trillions of features with a fixed amount of memory.

Note that this is superior to out-of-vocabulary shared "hash buckets" in that the embedding is extremely likely to be unique for each token as opposed to being shared across probably-colliding tokens. The price is that we must compute a hash once for each scalar in the token's embedding as opposed to once per token.

If params is a list, it represents a partition of the embedding parameters. Each tensor in the list should have the same length, except for the first ones which may have an additional element. For instance 10 parameters can be partitioned in 4 tensors with length [3, 3, 2, 2].

Args: params: A Tensor or list of Tensors. Each tensor must be of rank 1 with fully-defined shape. values: Tensor of values to be embedded. dimension: Embedding dimension name: An optional name for this op.

Returns: A tensor with shape [d0, ..., dn, dimension] with shape(values) = [d0, ..., dn]

Raises: ValueError: if dimension is not positive or the partition size is invalid.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def hashed_embedding_lookup_sparse(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.hashed_embedding_lookup_sparse(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.hashed_embedding_lookup_sparse. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.hashed_embedding_lookup_sparse(x1, *args, **kwargs)

is equivalent to

builder.hashed_embedding_lookup_sparse(*args, **kwargs)(x1)

tf.contrib.layers.hashed_embedding_lookup_sparse

Looks up embeddings of a sparse feature using parameter hashing.

See tf.contrib.layers.hashed_embedding_lookup for embedding with hashing.

Args: params: A Tensor or list of Tensors. Each tensor must be of rank 1 with fully-defined shape. sparse_values: A 2-D SparseTensor containing the values to be embedded. Some rows may be empty. dimension: Embedding dimension combiner: A string specifying how to combine embedding results for each entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the default. default_value: The value to use for an entry with no features. name: An optional name for this op.

Returns: Dense tensor with shape [N, dimension] with N the number of rows in sparse_values.

Raises: TypeError: If sparse_values is not a SparseTensor. ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def infer_real_valued_columns(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.infer_real_valued_columns(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.infer_real_valued_columns. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.infer_real_valued_columns(x1, *args, **kwargs)

is equivalent to

builder.infer_real_valued_columns(*args, **kwargs)(x1)

tf.contrib.layers.infer_real_valued_columns

None
@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def input_from_feature_columns(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.input_from_feature_columns(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.input_from_feature_columns. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.input_from_feature_columns(x1, *args, **kwargs)

is equivalent to

builder.input_from_feature_columns(*args, **kwargs)(x1)

tf.contrib.layers.input_from_feature_columns

A tf.contrib.layer style input layer builder based on FeatureColumns.

Generally a single example in training data is described with feature columns. At the first layer of the model, this column oriented data should be converted to a single tensor. Each feature column needs a different kind of operation during this conversion. For example sparse features need a totally different handling than continuous features.

An example usage of input_from_feature_columns is as follows:

# Building model for training columns_to_tensor = tf.parse_example(...) first_layer = input_from_feature_columns( columns_to_tensors=columns_to_tensor, feature_columns=feature_columns) second_layer = fully_connected(first_layer, ...) ...

where feature_columns can be defined as follows:

occupation = sparse_column_with_hash_bucket(column_name="occupation", hash_bucket_size=1000) occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16, combiner="sum") age = real_valued_column("age") age_buckets = bucketized_column( source_column=age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) occupation_x_age = crossed_column(columns=[occupation, age_buckets], hash_bucket_size=10000)

feature_columns=[occupation_emb, occupation_x_age]

Args: columns_to_tensors: A mapping from feature column to tensors. 'string' key means a base feature (not-transformed). It can have FeatureColumn as a key too. That means that FeatureColumn is already transformed by input pipeline. For example, inflow may have handled transformations. feature_columns: A set containing all the feature columns. All items in the set should be instances of classes derived by FeatureColumn. weight_collections: List of graph collections to which weights are added. trainable: If True also add variables to the graph collection GraphKeys.TRAINABLE_VARIABLES (see tf.Variable). scope: Optional scope for variable_scope.

Returns: A Tensor which can be consumed by hidden layers in the neural network.

Raises: ValueError: if FeatureColumn cannot be consumed by a neural network.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def is_summary_tag_unique(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.is_summary_tag_unique(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.is_summary_tag_unique. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.is_summary_tag_unique(x1, *args, **kwargs)

is equivalent to

builder.is_summary_tag_unique(*args, **kwargs)(x1)

tf.contrib.layers.is_summary_tag_unique

Checks if a summary tag is unique.

Args: tag: The tag to use

Returns: True if the summary tag is unique.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def joint_weighted_sum_from_feature_columns(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.joint_weighted_sum_from_feature_columns(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.joint_weighted_sum_from_feature_columns. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.joint_weighted_sum_from_feature_columns(x1, *args, **kwargs)

is equivalent to

builder.joint_weighted_sum_from_feature_columns(*args, **kwargs)(x1)

tf.contrib.layers.joint_weighted_sum_from_feature_columns

A restricted linear prediction builder based on FeatureColumns.

As long as all feature columns are unweighted sparse columns this computes the prediction of a linear model which stores all weights in a single variable.

Args: columns_to_tensors: A mapping from feature column to tensors. 'string' key means a base feature (not-transformed). It can have FeatureColumn as a key too. That means that FeatureColumn is already transformed by input pipeline. For example, inflow may have handled transformations. feature_columns: A set containing all the feature columns. All items in the set should be instances of classes derived from FeatureColumn. num_outputs: An integer specifying number of outputs. Default value is 1. weight_collections: List of graph collections to which weights are added. trainable: If True also add variables to the graph collection GraphKeys.TRAINABLE_VARIABLES (see tf.Variable). scope: Optional scope for variable_scope.

Returns: A tuple of followings: * A Tensor which represents predictions of a linear model. * A list of Variables storing the weights. * A Variable which is used for bias.

Raises: ValueError: if FeatureColumn cannot be used for linear predictions.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def l1_l2_regularizer(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.l1_l2_regularizer(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.l1_l2_regularizer. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.l1_l2_regularizer(x1, *args, **kwargs)

is equivalent to

builder.l1_l2_regularizer(*args, **kwargs)(x1)

tf.contrib.layers.l1_l2_regularizer

Returns a function that can be used to apply L1 L2 regularizations.

Args: scale_l1: A scalar multiplier Tensor for L1 regularization. scale_l2: A scalar multiplier Tensor for L2 regularization. scope: An optional scope name.

Returns: A function with signature l1_l2(weights) that applies a weighted sum of L1 L2 regularization.

Raises: ValueError: If scale is negative or if scale is not a float.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def l1_regularizer(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.l1_regularizer(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.l1_regularizer. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.l1_regularizer(x1, *args, **kwargs)

is equivalent to

builder.l1_regularizer(*args, **kwargs)(x1)

tf.contrib.layers.l1_regularizer

Returns a function that can be used to apply L1 regularization to weights.

L1 regularization encourages sparsity.

Args: scale: A scalar multiplier Tensor. 0.0 disables the regularizer. scope: An optional scope name.

Returns: A function with signature l1(weights) that apply L1 regularization.

Raises: ValueError: If scale is negative or if scale is not a float.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def l2_regularizer(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.l2_regularizer(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.l2_regularizer. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.l2_regularizer(x1, *args, **kwargs)

is equivalent to

builder.l2_regularizer(*args, **kwargs)(x1)

tf.contrib.layers.l2_regularizer

Returns a function that can be used to apply L2 regularization to weights.

Small values of L2 can help prevent overfitting the training data.

Args: scale: A scalar multiplier Tensor. 0.0 disables the regularizer. scope: An optional scope name.

Returns: A function with signature l2(weights) that applies L2 regularization.

Raises: ValueError: If scale is negative or if scale is not a float.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def layer_norm(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.layer_norm(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.layer_norm. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.layer_norm(x1, *args, **kwargs)

is equivalent to

builder.layer_norm(*args, **kwargs)(x1)

tf.contrib.layers.layer_norm

Adds a Layer Normalization layer from https://arxiv.org/abs/1607.06450.

"Layer Normalization"

Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton

Can be used as a normalizer function for conv2d and fully_connected.

Args: inputs: a tensor with 2 or more dimensions. The normalization occurs over all but the first dimension. center: If True, subtract beta. If False, beta is ignored. scale: If True, multiply by gamma. If False, gamma is not used. When the next layer is linear (also e.g. nn.relu), this can be disabled since the scaling can be done by the next layer. activation_fn: activation function, default set to None to skip it and maintain a linear activation. reuse: whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. variables_collections: optional collections for the variables. outputs_collections: collections to add the outputs. trainable: If True also add variables to the graph collection GraphKeys.TRAINABLE_VARIABLES (see tf.Variable). scope: Optional scope for variable_op_scope.

Returns: A Tensor representing the output of the operation.

Raises: ValueError: if rank or last dimension of inputs is undefined.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def legacy_fully_connected(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.legacy_fully_connected(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.legacy_fully_connected. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.legacy_fully_connected(x1, *args, **kwargs)

is equivalent to

builder.legacy_fully_connected(*args, **kwargs)(x1)

tf.contrib.layers.legacy_fully_connected

Adds the parameters for a fully connected layer and returns the output.

A fully connected layer is generally defined as a matrix multiply: y = f(w * x + b) where f is given by activation_fn. If activation_fn is None, the result of y = w * x + b is returned.

If x has shape [\(\text{dim}0, \text{dim}_1, ..., \text{dim}_n\)] with more than 2 dimensions (\(n > 1\)), then we repeat the matrix multiply along the first dimensions. The result r is a tensor of shape [\(\text{dim}_0, ..., \text{dim}{n-1},\) num_output_units], where \( r_{i_0, ..., i_{n-1}, k} = \sum_{0 \leq j < \text{dim}n} x{i_0, ... i_{n-1}, j} \cdot w_{j, k}\). This is accomplished by reshaping x to 2-D [\(\text{dim}0 \cdot ... \cdot \text{dim}{n-1}, \text{dim}n\)] before the matrix multiply and afterwards reshaping it to [\(\text{dim}_0, ..., \text{dim}{n-1},\) num_output_units].

This op creates w and optionally b. Bias (b) can be disabled by setting bias_init to None.

The variable creation is compatible with tf.variable_scope and so can be reused with tf.variable_scope or tf.make_template.

Most of the details of variable creation can be controlled by specifying the initializers (weight_init and bias_init) and in which collections to place the created variables (weight_collections and bias_collections; note that the variables are always added to the VARIABLES collection). The output of the layer can be placed in custom collections using output_collections. The collections arguments default to WEIGHTS, BIASES and ACTIVATIONS, respectively.

A per layer regularization can be specified by setting weight_regularizer and bias_regularizer, which are applied to the weights and biases respectively, and whose output is added to the REGULARIZATION_LOSSES collection.

Args: x: The input Tensor. num_output_units: The size of the output. activation_fn: activation function, default set to None to skip it and maintain a linear activation. weight_init: An optional weight initialization, defaults to xavier_initializer. bias_init: An initializer for the bias, defaults to 0. Set to None in order to disable bias. name: The name for this operation is used to name operations and to find variables. If specified it must be unique for this scope, otherwise a unique name starting with "fully_connected" will be created. See tf.variable_scope for details. weight_collections: List of graph collections to which weights are added. bias_collections: List of graph collections to which biases are added. output_collections: List of graph collections to which outputs are added. trainable: If True also add variables to the graph collection GraphKeys.TRAINABLE_VARIABLES (see tf.Variable). weight_regularizer: A regularizer like the result of l1_regularizer or l2_regularizer. Used for weights. bias_regularizer: A regularizer like the result of l1_regularizer or l2_regularizer. Used for biases.

Returns: The output of the fully connected layer.

Raises: ValueError: if x has rank less than 2 or if its last dimension is not set.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def make_all(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.make_all(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.make_all. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.make_all(x1, *args, **kwargs)

is equivalent to

builder.make_all(*args, **kwargs)(x1)

tf.contrib.layers.make_all

Generates `__all__` from the docstring of one or more modules.

Usage: make_all(__name__) or make_all(__name__, [sys.modules(__name__), other_module]). The doc string modules must each a docstring, and __all__ will contain all symbols with @@ references, where that symbol currently exists in the module named module_name.

Args: module_name: The name of the module (usually __name__). doc_string_modules: a list of modules from which to take docstring. If None, then a list containing only the module named module_name is used.

Returns: A list suitable for use as __all__.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def make_place_holder_tensors_for_base_features(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.make_place_holder_tensors_for_base_features(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.make_place_holder_tensors_for_base_features. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.make_place_holder_tensors_for_base_features(x1, *args, **kwargs)

is equivalent to

builder.make_place_holder_tensors_for_base_features(*args, **kwargs)(x1)

tf.contrib.layers.make_place_holder_tensors_for_base_features

Returns placeholder tensors for inference.

Args: feature_columns: An iterable containing all the feature columns. All items should be instances of classes derived from _FeatureColumn. Returns: A dict mapping feature keys to SparseTensors (sparse columns) or placeholder Tensors (dense columns).

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def max_pool2d(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.max_pool2d(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.max_pool2d. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.max_pool2d(x1, *args, **kwargs)

is equivalent to

builder.max_pool2d(*args, **kwargs)(x1)

tf.contrib.layers.max_pool2d

Adds a 2D Max Pooling op.

It is assumed that the pooling is done per image but not in batch or channels.

Args: inputs: A Tensor of size [batch_size, height, width, channels]. kernel_size: A list of length 2: [kernel_height, kernel_width] of the pooling kernel over which the op is computed. Can be an int if both values are the same. stride: A list of length 2: [stride_height, stride_width]. Can be an int if both strides are the same. Note that presently both strides must have the same value. padding: The padding method, either 'VALID' or 'SAME'. outputs_collections: The collections to which the outputs are added. scope: Optional scope for name_scope.

Returns: A Tensor representing the results of the pooling operation.

Raises: ValueError: If 'kernel_size' is not a 2-D list

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def multi_class_target(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.multi_class_target(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.multi_class_target. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.multi_class_target(x1, *args, **kwargs)

is equivalent to

builder.multi_class_target(*args, **kwargs)(x1)

tf.contrib.layers.multi_class_target

Creates a _TargetColumn for multi class single label classification.

The target column uses softmax cross entropy loss.

Args: n_classes: Integer, number of classes, must be >= 2 label_name: String, name of the key in label dict. Can be null if label is a tensor (single headed models). weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example.

Returns: An instance of _MultiClassTargetColumn.

Raises: ValueError: if n_classes is < 2

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def one_hot_column(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.one_hot_column(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.one_hot_column. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.one_hot_column(x1, *args, **kwargs)

is equivalent to

builder.one_hot_column(*args, **kwargs)(x1)

tf.contrib.layers.one_hot_column

Creates a _OneHotColumn.

Args: sparse_id_column: A _SparseColumn which is created by sparse_column_with_* or crossed_column functions. Note that combiner defined in sparse_id_column is ignored.

Returns: An _OneHotColumn.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def one_hot_encoding(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.one_hot_encoding(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.one_hot_encoding. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.one_hot_encoding(x1, *args, **kwargs)

is equivalent to

builder.one_hot_encoding(*args, **kwargs)(x1)

tf.contrib.layers.one_hot_encoding

Transform numeric labels into onehot_labels using tf.one_hot.

Args: labels: [batch_size] target labels. num_classes: total number of classes. on_value: A scalar defining the on-value. off_value: A scalar defining the off-value. outputs_collections: collection to add the outputs. scope: Optional scope for name_scope.

Returns: one hot encoding of the labels.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def optimize_loss(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.optimize_loss(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.optimize_loss. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.optimize_loss(x1, *args, **kwargs)

is equivalent to

builder.optimize_loss(*args, **kwargs)(x1)

tf.contrib.layers.optimize_loss

Given loss and parameters for optimizer, returns a training op.

Various ways of passing optimizers, include: - string, name of the optimizer like 'SGD', 'Adam', see OPTIMIZER_CLS_NAMES for full list. E.g. optimize_loss(..., optimizer='Adam'). - function, takes learning rate Tensor as argument and must return Optimizer instance. E.g. optimize_loss(..., optimizer=lambda lr: tf.train.MomentumOptimizer(lr, momentum=0.5)). Alternatively, if learning_rate is None, the function takes no arguments. E.g. optimize_loss(..., learning_rate=None, optimizer=lambda: tf.train.MomentumOptimizer(0.5, momentum=0.5)). - class, subclass of Optimizer that takes only one required argument - learning rate, such as AdamOptimizer, AdagradOptimizer. E.g. optimize_loss(..., optimizer=tf.train.AdagradOptimizer). - object, instance of subclass of Optimizer. E.g., optimizer_loss(..., optimizer=tf.train.AdagradOptimizer(0.5)).

Args: loss: Tensor, 0 dimensional. global_step: Tensor, step counter for each update. learning_rate: float or Tensor, magnitude of update per each training step. optimizer: string, class or optimizer instance, used as trainer. string should be name of optimizer, like 'SGD', 'Adam', 'Adagrad'. Full list in OPTIMIZER_CLS_NAMES constant. class should be sub-class of tf.Optimizer that implements compute_gradients and apply_gradients functions. optimizer instance should be instantion of tf.Optimizer sub-class and have compute_gradients and apply_gradients functions. gradient_noise_scale: float or None, adds 0-mean normal noise scaled by this value. gradient_multipliers: dict of variables or variable names to floats. If present, gradients for specified variables will be multiplied by given constant. clip_gradients: float or None, clips gradients by this value. learning_rate_decay_fn: function, takes learning_rate and global_step Tensors, returns Tensor. Can be used to implement any learning rate decay functions. For example: tf.train.exponential_decay. update_ops: list of update Operations to execute at each step. If None, uses elements of UPDATE_OPS collection. The order of execution between update_ops and loss is non-deterministic. variables: list of variables to optimize or None to use all trainable variables. name: The name for this operation is used to scope operations and summaries. summaries: List of internal quantities to visualize on tensorboard. If not set only the loss and the learning rate will be reported. The complete list is in OPTIMIZER_SUMMARIES.

Returns: Training op.

Raises: ValueError: if optimizer is wrong type.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def parse_feature_columns_from_examples(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.parse_feature_columns_from_examples(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.parse_feature_columns_from_examples. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.parse_feature_columns_from_examples(x1, *args, **kwargs)

is equivalent to

builder.parse_feature_columns_from_examples(*args, **kwargs)(x1)

tf.contrib.layers.parse_feature_columns_from_examples

Parses tf.Examples to extract tensors for given feature_columns.

This is a wrapper of 'tf.parse_example'. A typical usage is as follows:

```python columns_to_tensor = parse_feature_columns_from_examples( serialized=my_data, feature_columns=my_features)

Where my_features are:

Define features and transformations

country = sparse_column_with_keys(column_name="native_country", keys=["US", "BRA", ...]) country_emb = embedding_column(sparse_id_column=country, dimension=3, combiner="sum") occupation = sparse_column_with_hash_bucket(column_name="occupation", hash_bucket_size=1000) occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16, combiner="sum") occupation_x_country = crossed_column(columns=[occupation, country], hash_bucket_size=10000) age = real_valued_column("age") age_buckets = bucketized_column( source_column=age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])

my_features = [occupation_emb, age_buckets, country_emb] ```

Args: serialized: A vector (1-D Tensor) of strings, a batch of binary serialized Example protos. feature_columns: An iterable containing all the feature columns. All items should be instances of classes derived from _FeatureColumn. name: A name for this operation (optional). example_names: A vector (1-D Tensor) of strings (optional), the names of the serialized protos in the batch.

Returns: A dict mapping FeatureColumn to Tensor and SparseTensor values.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def real_valued_column(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.real_valued_column(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.real_valued_column. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.real_valued_column(x1, *args, **kwargs)

is equivalent to

builder.real_valued_column(*args, **kwargs)(x1)

tf.contrib.layers.real_valued_column

Creates a _RealValuedColumn.

Args: column_name: A string defining real valued column name. dimension: An integer specifying dimension of the real valued column. The default is 1. The Tensor representing the _RealValuedColumn will have the shape of [batch_size, dimension]. default_value: A single value compatible with dtype or a list of values compatible with dtype which the column takes on during tf.Example parsing if data is missing. If None, then tf.parse_example will fail if an example does not contain this column. If a single value is provided, the same value will be applied as the default value for every dimension. If a list of values is provided, the length of the list should be equal to the value of dimension. dtype: defines the type of values. Default value is tf.float32. normalizer: If not None, a function that can be used to normalize the value of the real valued column after default_value is applied for parsing. Normalizer function takes the input tensor as its argument, and returns the output tensor. (e.g. lambda x: (x - 3.0) / 4.2). Returns: A _RealValuedColumn. Raises: TypeError: if dimension is not an int ValueError: if dimension is not a positive integer TypeError: if default_value is a list but its length is not equal to the value of dimension. TypeError: if default_value is not compatible with dtype. ValueError: if dtype is not convertable to tf.float32.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def regression_target(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.regression_target(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.regression_target. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.regression_target(x1, *args, **kwargs)

is equivalent to

builder.regression_target(*args, **kwargs)(x1)

tf.contrib.layers.regression_target

Creates a _TargetColumn for linear regression.

Args: label_name: String, name of the key in label dict. Can be null if label is a tensor (single headed models). weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. target_dimension: dimension of the target for multilabels.

Returns: An instance of _TargetColumn

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def repeat(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.repeat(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.repeat. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.repeat(x1, *args, **kwargs)

is equivalent to

builder.repeat(*args, **kwargs)(x1)

tf.contrib.layers.repeat

Applies the same layer with the same arguments repeatedly.

```python y = repeat(x, 3, conv2d, 64, [3, 3], scope='conv1') # It is equivalent to:

x = conv2d(x, 64, [3, 3], scope='conv1/conv1_1') x = conv2d(x, 64, [3, 3], scope='conv1/conv1_2') y = conv2d(x, 64, [3, 3], scope='conv1/conv1_3') ```

If the scope argument is not given in kwargs, it is set to layer.__name__, or layer.func.__name__ (for functools.partial objects). If neither __name__ nor func.__name__ is available, the layers are called with scope='stack'.

Args: inputs: A Tensor suitable for layer. repetitions: Int, number of repetitions. layer: A layer with arguments (inputs, *args, **kwargs) args: Extra args for the layer. *kwargs: Extra kwargs for the layer.

Returns: a tensor result of applying the layer, repetitions times. Raises: ValueError: if the op is unknown or wrong.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def safe_embedding_lookup_sparse(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.safe_embedding_lookup_sparse(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.safe_embedding_lookup_sparse. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.safe_embedding_lookup_sparse(x1, *args, **kwargs)

is equivalent to

builder.safe_embedding_lookup_sparse(*args, **kwargs)(x1)

tf.contrib.layers.safe_embedding_lookup_sparse

Lookup embedding results, accounting for invalid IDs and empty features.

The partitioned embedding in embedding_weights must all be the same shape except for the first dimension. The first dimension is allowed to vary as the vocabulary size is not necessarily a multiple of P.

Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs with non-positive weight. For an entry with no features, the embedding vector for default_id is returned, or the 0-vector if default_id is not supplied.

The ids and weights may be multi-dimensional. Embeddings are always aggregated along the last dimension.

Args: embedding_weights: A list of P float tensors or values representing partitioned embedding tensors. The total unpartitioned shape should be [e_0, e_1, ..., e_m], where e_0 represents the vocab size and e_1, ..., e_m are the embedding dimensions. sparse_ids: SparseTensor of shape [d_0, d_1, ..., d_n] containing the ids. d_0 is typically batch size. sparse_weights: SparseTensor of same shape as sparse_ids, containing float weights corresponding to sparse_ids, or None if all weights are be assumed to be 1.0. combiner: A string specifying how to combine embedding results for each entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the default. default_id: The id to use for an entry with no features. name: A name for this operation (optional). partition_strategy: A string specifying the partitioning strategy. Currently "div" and "mod" are supported. Default is "div".

Returns: Dense tensor of shape [d_0, d_1, ..., d_{n-1}, e_1, ..., e_m].

Raises: ValueError: if embedding_weights is empty.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def separable_convolution2d(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.separable_convolution2d(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.separable_convolution2d. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.separable_convolution2d(x1, *args, **kwargs)

is equivalent to

builder.separable_convolution2d(*args, **kwargs)(x1)

tf.contrib.layers.separable_convolution2d

Adds a depth-separable 2D convolution with optional batch_norm layer.

This op first performs a depthwise convolution that acts separately on channels, creating a variable called depthwise_weights. If num_outputs is not None, it adds a pointwise convolution that mixes channels, creating a variable called pointwise_weights. Then, if batch_norm_params is None, it adds bias to the result, creating a variable called 'biases', otherwise it adds a batch normalization layer. It finally applies an activation function to produce the end result.

Args: inputs: a tensor of size [batch_size, height, width, channels]. num_outputs: the number of pointwise convolution output filters. If is None, then we skip the pointwise convolution stage. kernel_size: a list of length 2: [kernel_height, kernel_width] of of the filters. Can be an int if both values are the same. depth_multiplier: the number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to num_filters_in * depth_multiplier. stride: a list of length 2: [stride_height, stride_width], specifying the depthwise convolution stride. Can be an int if both strides are the same. padding: one of 'VALID' or 'SAME'. activation_fn: activation function, set to None to skip it and maintain a linear activation. normalizer_fn: normalization function to use instead of biases. If normalizer_fn is provided then biases_initializer and biases_regularizer are ignored and biases are not created nor added. default set to None for no normalizer function normalizer_params: normalization function parameters. weights_initializer: An initializer for the weights. weights_regularizer: Optional regularizer for the weights. biases_initializer: An initializer for the biases. If None skip biases. biases_regularizer: Optional regularizer for the biases. reuse: whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. variables_collections: optional list of collections for all the variables or a dictionay containing a different list of collection per variable. outputs_collections: collection to add the outputs. trainable: whether or not the variables should be trainable or not. scope: Optional scope for variable_scope.

Returns: A Tensor representing the output of the operation.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def shared_embedding_columns(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.shared_embedding_columns(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.shared_embedding_columns. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.shared_embedding_columns(x1, *args, **kwargs)

is equivalent to

builder.shared_embedding_columns(*args, **kwargs)(x1)

tf.contrib.layers.shared_embedding_columns

Creates a list of `_EmbeddingColumn` sharing the same embedding.

Args: sparse_id_columns: An iterable of _SparseColumn, such as those created by sparse_column_with_* or crossed_column functions. Note that combiner defined in each sparse_id_column is ignored. dimension: An integer specifying dimension of the embedding. combiner: A string specifying how to reduce if there are multiple entries in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each of this can be considered an example level normalization on the column: * "sum": do not normalize * "mean": do l1 normalization * "sqrtn": do l2 normalization For more information: tf.embedding_lookup_sparse. shared_embedding_name: (Optional). A string specifying the name of shared embedding weights. This will be needed if you want to reference the shared embedding separately from the generated _EmbeddingColumn. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to tf.truncated_normal_initializer with mean 0.0 and standard deviation 1/sqrt(sparse_id_columns[0].length). ckpt_to_load_from: (Optional). String representing checkpoint name/pattern to restore the column weights. Required if tensor_name_in_ckpt is not None. tensor_name_in_ckpt: (Optional). Name of the Tensor in the provided checkpoint from which to restore the column weights. Required if ckpt_to_load_from is not None.

Returns: A tuple of _EmbeddingColumn with shared embedding space.

Raises: ValueError: if sparse_id_columns is empty, or its elements are not compatible with each other. TypeError: if at least one element of sparse_id_columns is not a SparseTensor.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def softmax(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.softmax(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.softmax. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.softmax(x1, *args, **kwargs)

is equivalent to

builder.softmax(*args, **kwargs)(x1)

tf.contrib.layers.softmax

Performs softmax on Nth dimension of N-dimensional logit tensor.

For two-dimensional logits this reduces to tf.nn.softmax. The N-th dimension needs to have a specified number of elements (number of classes).

Args: logits: N-dimensional Tensor with logits, where N > 1. scope: Optional scope for variable_scope.

Returns: a Tensor with same shape and type as logits.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def sparse_column_with_hash_bucket(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.sparse_column_with_hash_bucket(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.sparse_column_with_hash_bucket. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.sparse_column_with_hash_bucket(x1, *args, **kwargs)

is equivalent to

builder.sparse_column_with_hash_bucket(*args, **kwargs)(x1)

tf.contrib.layers.sparse_column_with_hash_bucket

Creates a _SparseColumn with hashed bucket configuration.

Use this when your sparse features are in string or integer format, but you don't have a vocab file that maps each value to an integer ID. output_id = Hash(input_feature_string) % bucket_size

Args: column_name: A string defining sparse column name. hash_bucket_size: An int that is > 1. The number of buckets. combiner: A string specifying how to reduce if the sparse column is multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum" the default: * "sum": do not normalize features in the column * "mean": do l1 normalization on features in the column * "sqrtn": do l2 normalization on features in the column For more information: tf.embedding_lookup_sparse. dtype: The type of features. Only string and integer types are supported.

Returns: A _SparseColumn with hashed bucket configuration

Raises: ValueError: hash_bucket_size is not greater than 2. ValueError: dtype is neither string nor integer.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def sparse_column_with_integerized_feature(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.sparse_column_with_integerized_feature(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.sparse_column_with_integerized_feature. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.sparse_column_with_integerized_feature(x1, *args, **kwargs)

is equivalent to

builder.sparse_column_with_integerized_feature(*args, **kwargs)(x1)

tf.contrib.layers.sparse_column_with_integerized_feature

Creates an integerized _SparseColumn.

Use this when your features are already pre-integerized into int64 IDs. output_id = input_feature

Args: column_name: A string defining sparse column name. bucket_size: An int that is > 1. The number of buckets. It should be bigger than maximum feature. In other words features in this column should be an int64 in range [0, bucket_size) combiner: A string specifying how to reduce if the sparse column is multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum" the default: * "sum": do not normalize features in the column * "mean": do l1 normalization on features in the column * "sqrtn": do l2 normalization on features in the column For more information: tf.embedding_lookup_sparse. dtype: Type of features. It should be an integer type. Default value is dtypes.int64.

Returns: An integerized _SparseColumn definition.

Raises: ValueError: bucket_size is not greater than 1. ValueError: dtype is not integer.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def sparse_column_with_keys(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.sparse_column_with_keys(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.sparse_column_with_keys. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.sparse_column_with_keys(x1, *args, **kwargs)

is equivalent to

builder.sparse_column_with_keys(*args, **kwargs)(x1)

tf.contrib.layers.sparse_column_with_keys

Creates a _SparseColumn with keys.

Look up logic is as follows: lookup_id = index_of_feature_in_keys if feature in keys else default_value

Args: column_name: A string defining sparse column name. keys: a string list defining vocabulary. default_value: The value to use for out-of-vocabulary feature values. Default is -1. combiner: A string specifying how to reduce if the sparse column is multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum" the default: * "sum": do not normalize features in the column * "mean": do l1 normalization on features in the column * "sqrtn": do l2 normalization on features in the column For more information: tf.embedding_lookup_sparse.

Returns: A _SparseColumnKeys with keys configuration.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def sparse_feature_cross(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.sparse_feature_cross(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.sparse_feature_cross. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.sparse_feature_cross(x1, *args, **kwargs)

is equivalent to

builder.sparse_feature_cross(*args, **kwargs)(x1)

tf.contrib.layers.sparse_feature_cross

Crosses a list of Tensor or SparseTensor objects.

See sparse_feature_cross_kernel.cc for more details.

Args: inputs: List of SparseTensor or Tensor to be crossed. hashed_output: If true, returns the hash of the cross instead of the string. This will allow us avoiding string manipulations. num_buckets: It is used if hashed_output is true. output = hashed_value%num_buckets if num_buckets > 0 else hashed_value. name: A name prefix for the returned tensors (optional).

Returns: A SparseTensor with the crossed features. Return type is string if hashed_output=False, int64 otherwise.

Raises: TypeError: If the inputs aren't either SparseTensor or Tensor.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def stack(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.stack(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.stack. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.stack(x1, *args, **kwargs)

is equivalent to

builder.stack(*args, **kwargs)(x1)

tf.contrib.layers.stack

Builds a stack of layers by applying layer repeatedly using stack_args.

stack allows you to repeatedly apply the same operation with different arguments stack_args[i]. For each application of the layer, stack creates a new scope appended with an increasing number. For example:

```python y = stack(x, fully_connected, [32, 64, 128], scope='fc') # It is equivalent to:

x = fully_connected(x, 32, scope='fc/fc_1') x = fully_connected(x, 64, scope='fc/fc_2') y = fully_connected(x, 128, scope='fc/fc_3') ```

If the scope argument is not given in kwargs, it is set to layer.__name__, or layer.func.__name__ (for functools.partial objects). If neither __name__ nor func.__name__ is available, the layers are called with scope='stack'.

Args: inputs: A Tensor suitable for layer. layer: A layer with arguments (inputs, *args, **kwargs) stack_args: A list/tuple of parameters for each call of layer. **kwargs: Extra kwargs for the layer.

Returns: a Tensor result of applying the stacked layers.

Raises: ValueError: if the op is unknown or wrong.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def sum_regularizer(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.sum_regularizer(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.sum_regularizer. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.sum_regularizer(x1, *args, **kwargs)

is equivalent to

builder.sum_regularizer(*args, **kwargs)(x1)

tf.contrib.layers.sum_regularizer

Returns a function that applies the sum of multiple regularizers.

Args: regularizer_list: A list of regularizers to apply. scope: An optional scope name

Returns: A function with signature sum_reg(weights) that applies the sum of all the input regularizers.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def summarize_activation(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.summarize_activation(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.summarize_activation. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.summarize_activation(x1, *args, **kwargs)

is equivalent to

builder.summarize_activation(*args, **kwargs)(x1)

tf.contrib.layers.summarize_activation

Summarize an activation.

This applies the given activation and adds useful summaries specific to the activation.

Args: op: The tensor to summarize (assumed to be a layer activation). Returns: The summary op created to summarize op.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def summarize_activations(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.summarize_activations(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.summarize_activations. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.summarize_activations(x1, *args, **kwargs)

is equivalent to

builder.summarize_activations(*args, **kwargs)(x1)

tf.contrib.layers.summarize_activations

Summarize activations, using `summarize_activation` to summarize.
@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def summarize_collection(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.summarize_collection(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.summarize_collection. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.summarize_collection(x1, *args, **kwargs)

is equivalent to

builder.summarize_collection(*args, **kwargs)(x1)

tf.contrib.layers.summarize_collection

Summarize a graph collection of tensors, possibly filtered by name.
@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def summarize_tensor(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.summarize_tensor(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.summarize_tensor. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.summarize_tensor(x1, *args, **kwargs)

is equivalent to

builder.summarize_tensor(*args, **kwargs)(x1)

tf.contrib.layers.summarize_tensor

Summarize a tensor using a suitable summary type.

This function adds a summary op for tensor. The type of summary depends on the shape of tensor. For scalars, a scalar_summary is created, for all other tensors, histogram_summary is used.

Args: tensor: The tensor to summarize tag: The tag to use, if None then use tensor's op's name.

Returns: The summary op created or None for string tensors.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def summarize_tensors(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.summarize_tensors(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.summarize_tensors. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.summarize_tensors(x1, *args, **kwargs)

is equivalent to

builder.summarize_tensors(*args, **kwargs)(x1)

tf.contrib.layers.summarize_tensors

Summarize a set of tensors.
@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def unit_norm(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.unit_norm(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.unit_norm. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.unit_norm(x1, *args, **kwargs)

is equivalent to

builder.unit_norm(*args, **kwargs)(x1)

tf.contrib.layers.unit_norm

Normalizes the given input across the specified dimension to unit length.

Note that the rank of input must be known.

Args: inputs: A Tensor of arbitrary size. dim: The dimension along which the input is normalized. epsilon: A small value to add to the inputs to avoid dividing by zero. scope: Optional scope for variable_scope.

Returns: The normalized Tensor.

Raises: ValueError: If dim is smaller than the number of dimensions in 'inputs'.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def variance_scaling_initializer(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.variance_scaling_initializer(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.variance_scaling_initializer. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.variance_scaling_initializer(x1, *args, **kwargs)

is equivalent to

builder.variance_scaling_initializer(*args, **kwargs)(x1)

tf.contrib.layers.variance_scaling_initializer

Returns an initializer that generates tensors without scaling variance.

When initializing a deep network, it is in principle advantageous to keep the scale of the input variance constant, so it does not explode or diminish by reaching the final layer. This initializer use the following formula: if mode='FAN_IN': # Count only number of input connections. n = fan_in elif mode='FAN_OUT': # Count only number of output connections. n = fan_out elif mode='FAN_AVG': # Average number of inputs and output connections. n = (fan_in + fan_out)/2.0

truncated_normal(shape, 0.0, stddev=sqrt(factor / n))

To get http://arxiv.org/pdf/1502.01852v1.pdf use (Default): - factor=2.0 mode='FAN_IN' uniform=False To get http://arxiv.org/abs/1408.5093 use: - factor=1.0 mode='FAN_IN' uniform=True To get http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf use: - factor=1.0 mode='FAN_AVG' uniform=True. To get xavier_initializer use either: - factor=1.0 mode='FAN_AVG' uniform=True. - factor=1.0 mode='FAN_AVG' uniform=False.

Args: factor: Float. A multiplicative factor. mode: String. 'FAN_IN', 'FAN_OUT', 'FAN_AVG'. uniform: Whether to use uniform or normal distributed random initialization. seed: A Python integer. Used to create random seeds. See set_random_seed for behavior. dtype: The data type. Only floating point types are supported.

Returns: An initializer that generates tensors with unit variance.

Raises: ValueError: if dtype is not a floating point type. TypeError: if mode is not in ['FAN_IN', 'FAN_OUT', 'FAN_AVG'].

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def weighted_sparse_column(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.weighted_sparse_column(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.weighted_sparse_column. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.weighted_sparse_column(x1, *args, **kwargs)

is equivalent to

builder.weighted_sparse_column(*args, **kwargs)(x1)

tf.contrib.layers.weighted_sparse_column

Creates a _SparseColumn by combining sparse_id_column with a weight column.

Args: sparse_id_column: A _SparseColumn which is created by sparse_column_with_* functions. weight_column_name: A string defining a sparse column name which represents weight or value of the corresponding sparse id feature. dtype: Type of weights, such as tf.float32 Returns: A _WeightedSparseColumn composed of two sparse features: one represents id, the other represents weight (value) of the id feature in that example. Raises: ValueError: if dtype is not convertible to float.

An example usage: python words = sparse_column_with_hash_bucket("words", 1000) tfidf_weighted_words = weighted_sparse_column(words, "tfidf_score")

This configuration assumes that input dictionary of model contains the following two items: * (key="words", value=word_tensor) where word_tensor is a SparseTensor. * (key="tfidf_score", value=tfidf_score_tensor) where tfidf_score_tensor is a SparseTensor. Following are assumed to be true: * word_tensor.indices = tfidf_score_tensor.indices * word_tensor.shape = tfidf_score_tensor.shape

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def weighted_sum_from_feature_columns(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.weighted_sum_from_feature_columns(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.weighted_sum_from_feature_columns. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.weighted_sum_from_feature_columns(x1, *args, **kwargs)

is equivalent to

builder.weighted_sum_from_feature_columns(*args, **kwargs)(x1)

tf.contrib.layers.weighted_sum_from_feature_columns

A tf.contrib.layer style linear prediction builder based on FeatureColumns.

Generally a single example in training data is described with feature columns. This function generates weighted sum for each num_outputs. Weighted sum refers to logits in classification problems. It refers to prediction itself for linear regression problems.

An example usage of weighted_sum_from_feature_columns is as follows:

# Building model for training columns_to_tensor = tf.parse_example(...) logits = weighted_sum_from_feature_columns( columns_to_tensors=columns_to_tensor, feature_columns=feature_columns, num_outputs=1) loss = tf.nn.sigmoid_cross_entropy_with_logits(logits, labels)

where feature_columns can be defined as follows:

occupation = sparse_column_with_hash_bucket(column_name="occupation", hash_bucket_size=1000) occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16, combiner="sum") age = real_valued_column("age") age_buckets = bucketized_column( source_column=age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) occupation_x_age = crossed_column(columns=[occupation, age_buckets], hash_bucket_size=10000)

feature_columns=[occupation_emb, occupation_x_age]

Args: columns_to_tensors: A mapping from feature column to tensors. 'string' key means a base feature (not-transformed). It can have FeatureColumn as a key too. That means that FeatureColumn is already transformed by input pipeline. For example, inflow may have handled transformations. feature_columns: A set containing all the feature columns. All items in the set should be instances of classes derived from FeatureColumn. num_outputs: An integer specifying number of outputs. Default value is 1. weight_collections: List of graph collections to which weights are added. trainable: If True also add variables to the graph collection GraphKeys.TRAINABLE_VARIABLES (see tf.Variable). scope: Optional scope for variable_scope.

Returns: A tuple of followings: * A Tensor which represents predictions of a linear model. * A dictionary which maps feature_column to corresponding Variable. * A Variable which is used for bias.

Raises: ValueError: if FeatureColumn cannot be used for linear predictions.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)

def xavier_initializer(

self, *args, **kwargs)

THIS METHOD IS AUTOMATICALLY GENERATED

builder.xavier_initializer(*args, **kwargs)

It accepts the same arguments as tf.contrib.layers.xavier_initializer. However, the 1st argument is omitted, a partial with the rest of the arguments is returned which expects the 1st argument such that

tf.contrib.layers.xavier_initializer(x1, *args, **kwargs)

is equivalent to

builder.xavier_initializer(*args, **kwargs)(x1)

tf.contrib.layers.xavier_initializer

Returns an initializer performing "Xavier" initialization for weights.

This function implements the weight initialization from:

Xavier Glorot and Yoshua Bengio (2010): Understanding the difficulty of training deep feedforward neural networks. International conference on artificial intelligence and statistics.

This initializer is designed to keep the scale of the gradients roughly the same in all layers. In uniform distribution this ends up being the range: x = sqrt(6. / (in + out)); [-x, x] and for normal distribution a standard deviation of sqrt(3. / (in + out)) is used.

Args: uniform: Whether to use uniform or normal distributed random initialization. seed: A Python integer. Used to create random seeds. See set_random_seed for behavior. dtype: The data type. Only floating point types are supported.

Returns: An initializer for a weight matrix.

@functools.wraps(fn)
def method(self, *args, **kwargs):
    kwargs['_return_type'] = _return_type
    return self.Then(fn, *args, **kwargs)