diff --git a/tensorflow-core/tensorflow-core-api/pom.xml b/tensorflow-core/tensorflow-core-api/pom.xml index 13e2532b464..3ecd648e60b 100644 --- a/tensorflow-core/tensorflow-core-api/pom.xml +++ b/tensorflow-core/tensorflow-core-api/pom.xml @@ -34,7 +34,16 @@ junit junit - 4.12 + test + + + org.openjdk.jmh + jmh-core + test + + + org.openjdk.jmh + jmh-generator-annprocess test diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index 6cf6d4fe232..3bf50107a61 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -252,12 +252,20 @@ import org.tensorflow.op.core.Zeros; import org.tensorflow.op.core.ZerosLike; import org.tensorflow.tools.Shape; +import org.tensorflow.tools.ndarray.BooleanNdArray; +import org.tensorflow.tools.ndarray.ByteNdArray; +import org.tensorflow.tools.ndarray.DoubleNdArray; +import org.tensorflow.tools.ndarray.FloatNdArray; +import org.tensorflow.tools.ndarray.IntNdArray; +import org.tensorflow.tools.ndarray.LongNdArray; +import org.tensorflow.tools.ndarray.NdArray; import org.tensorflow.types.TBool; import org.tensorflow.types.TFloat32; import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; +import org.tensorflow.types.TUint8; import org.tensorflow.types.family.TNumber; import org.tensorflow.types.family.TType; @@ -269,27 +277,27 @@ *

Example usage: *

{@code
  * try (Graph g = new Graph()) {
- *   Ops ops = Ops.create(g);
+ *   Ops tf = Ops.create(g);
  *   // Operations are typed classes with convenience
  *   // builders in Ops.
- *   Constant three = ops.constant(3);
+ *   Constant three = tf.val(3);
  *   // Single-result operations implement the Operand
  *   // interface, so this works too.
- *   Operand four = ops.constant(4);
+ *   Operand four = tf.val(4);
  *   // Most builders are found within a group, and accept
  *   // Operand types as operands
- *   Operand nine = ops.math.add(four, ops.constant(5));
+ *   Operand nine = tf.math.add(four, tf.val(5));
  *   // Multi-result operations however offer methods to
  *   // select a particular result for use.
- *   Operand result = 
- *       ops.math.add(ops.unique(s, a).y(), b);
+ *   Operand result = 
+ *       tf.math.add(tf.unique(s, a).y(), b);
  *   // Optional attributes
- *   ops.linalg.matMul(a, b, MatMul.transposeA(true));
+ *   tf.linalg.matMul(a, b, MatMul.transposeA(true));
  *   // Naming operators
- *   ops.withName("foo").constant(5); // name "foo"
+ *   tf.withName("foo").val(5); // name "foo"
  *   // Names can exist in a hierarchy
- *   Ops sub = ops.withSubScope("sub");
- *   sub.withName("bar").constant(4); // "sub/bar"
+ *   Ops sub = tf.withSubScope("sub");
+ *   sub.withName("bar").val(4); // "sub/bar"
  * }
  * }
*/ @@ -401,6 +409,96 @@ public Any any(Operand input, Operand axis, return Any.create(scope, input, axis, options); } + /** + * Creates a constant of {@code int} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a float constant + */ + public Constant array(int... data) { + return Constant.arrayOf(scope, data); + } + + /** + * Creates a constant of {@code String} elements, using the default UTF-8 charset. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return the {@code String} constant + */ + public Constant array(String... data) { + return Constant.arrayOf(scope, data); + } + + /** + * Creates a constant of {@code boolean} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a boolean constant + */ + public Constant array(boolean... data) { + return Constant.arrayOf(scope, data); + } + + /** + * Creates a constant of {@code long} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a long constant + */ + public Constant array(long... data) { + return Constant.arrayOf(scope, data); + } + + /** + * Creates a constant of {@code float} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a float constant + */ + public Constant array(float... data) { + return Constant.arrayOf(scope, data); + } + + /** + * Creates a constant of {@code double} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a double constant + */ + public Constant array(double... data) { + return Constant.arrayOf(scope, data); + } + + /** + * Creates a constant of {@code byte} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a byte constant + */ + public Constant array(byte... data) { + return Constant.arrayOf(scope, data); + } + + /** + * Creates a constant of {@code String} elements, using the given charset. + * + * @param scope is a scope used to add the underlying operation. + * @param charset charset for encoding/decoding strings bytes. + * @param data An array containing the values to put into the new constant. String elements are + * sequences of bytes from the last array dimension. + * @return the {@code String} constant + */ + public Constant array(Charset charset, String... data) { + return Constant.arrayOf(scope, charset, data); + } + /** * Asserts that the given condition is true. *

@@ -981,6059 +1079,6233 @@ public Concat concat(Iterable } /** - * Creates a constant containing a single {@code int} element. + * Create a constant from a Java object. * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return an integer constant - */ - public Constant constant(int data) { - return Constant.create(scope, data); - } - - /** - * Creates a rank-3 constant of {@code int} elements. + *

The argument {@code object} is first converted into a Tensor using {@link + * org.tensorflow.Tensor#create(Object)}, so only Objects supported by this method must be + * provided. For example: * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - */ - public Constant constant(int[][][] data) { - return Constant.create(scope, data); - } - - /** - * Creates a rank-4 constant of {@code String} elements, each represented as an array of {@code byte}s. + *

{@code
+   *  Constant.create(scope, new int[]{{1, 2}, {3, 4}}, TInt32.DTYPE); // returns a 2x2 integer matrix
+   *  }
* * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. + * @param object a Java object representing the constant. + * @return a constant of type `type` + * @see org.tensorflow.Tensor#create(Object) Tensor.create + * @deprecated use {@link Ops#val(Tensor)} instead */ - public Constant constant(byte[][][][][] data) { - return Constant.create(scope, data); + @Deprecated + public Constant constant(Object object, DataType type) { + return Constant.create(scope, object, type); } /** - * Creates a rank-5 constant of {@code long} elements. + * Create a {@link TInt32} constant with data from the given buffer. + * + *

Creates a constant with the given shape by copying elements from the buffer (starting from + * its current position) into the tensor. For example, if {@code shape = {2,3} } (which represents + * a 2x3 matrix) then the buffer must have 6 elements remaining, which will be consumed by this + * method. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return an integer constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @deprecated use {@link Ops#val(Tensor) Ops.constant(Tensor<TInt32>)} instead */ - public Constant constant(long[][][][][] data) { - return Constant.create(scope, data); + @Deprecated + public Constant constant(long[] shape, IntBuffer data) { + return Constant.create(scope, shape, data); } /** - * Creates a constant containing a single {@code boolean} element. + * Create a {@link TInt64} constant with data from the given buffer. + * + *

Creates a constant with the given shape by copying elements from the buffer (starting from + * its current position) into the tensor. For example, if {@code shape = {2,3} } (which represents + * a 2x3 matrix) then the buffer must have 6 elements remaining, which will be consumed by this + * method. * * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a boolean constant + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a long constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @deprecated use {@link Ops#val(Tensor) Ops.constant(Tensor<TInt64>)} instead */ - public Constant constant(boolean data) { - return Constant.create(scope, data); + @Deprecated + public Constant constant(long[] shape, LongBuffer data) { + return Constant.create(scope, shape, data); } /** - * Creates a rank-4 constant of {@code int} elements. + * Create a {@link TFloat64} constant with data from the given buffer. + * + *

Creates a constant with the given shape by copying elements from the buffer (starting from + * its current position) into the tensor. For example, if {@code shape = {2,3} } (which represents + * a 2x3 matrix) then the buffer must have 6 elements remaining, which will be consumed by this + * method. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a double constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @deprecated use {@link Ops#val(Tensor) Ops.constant(Tensor<TFloat64>)} instead */ - public Constant constant(int[][][][] data) { - return Constant.create(scope, data); + @Deprecated + public Constant constant(long[] shape, DoubleBuffer data) { + return Constant.create(scope, shape, data); } /** - * Creates a rank-3 constant of {@code String} elements, each represented as an array of {@code byte}s. + * Create a {@link TFloat32} constant with data from the given buffer. + * + *

Creates a constant with the given shape by copying elements from the buffer (starting from + * its current position) into the tensor. For example, if {@code shape = {2,3} } (which represents + * a 2x3 matrix) then the buffer must have 6 elements remaining, which will be consumed by this + * method. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a float constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @deprecated use {@link Ops#val(Tensor) Ops.constant(Tensor<TFloat32>)} instead */ - public Constant constant(byte[][][][] data) { - return Constant.create(scope, data); + @Deprecated + public Constant constant(long[] shape, FloatBuffer data) { + return Constant.create(scope, shape, data); } /** - * Creates a rank-3 constant of {@code long} elements. + * Create a constant with data from the given buffer. + * + *

Creates a Constant with the provided shape of any type where the constant data has been + * encoded into {@code data} as per the specification of the TensorFlow C + * API. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param type the tensor datatype. + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a constant of type `type` + * @throws IllegalArgumentException If the tensor datatype or shape is not compatible with the + * buffer + * @deprecated use {@link Ops#val(Tensor)} instead */ - public Constant constant(long[][][] data) { - return Constant.create(scope, data); + @Deprecated + public Constant constant(DataType type, long[] shape, ByteBuffer data) { + return Constant.create(scope, type, shape, data); } /** - * Creates a rank-1 constant of {@code String} elements, each represented as an array of {@code byte}s. + * This op consumes a lock created by `MutexLock`. + *

+ * This op exists to consume a tensor created by `MutexLock` (other than + * direct control dependencies). It should be the only that consumes the tensor, + * and will raise an error if it is not. Its only purpose is to keep the + * mutex lock tensor alive until it is consumed by this op. + *

+ * NOTE: This operation must run on the same device as its input. This may + * be enforced via the `colocate_with` mechanism. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. + * @param mutexLock A tensor returned by `MutexLock`. + * @return a new instance of ConsumeMutexLock */ - public Constant constant(byte[][] data) { - return Constant.create(scope, data); + public ConsumeMutexLock consumeMutexLock(Operand mutexLock) { + return ConsumeMutexLock.create(scope, mutexLock); } /** - * Creates a rank-3 constant of {@code double} elements. + * Does nothing. Serves as a control trigger for scheduling. + *

+ * Only useful as a placeholder for control edges. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @return a new instance of ControlTrigger */ - public Constant constant(double[][][] data) { - return Constant.create(scope, data); + public ControlTrigger controlTrigger() { + return ControlTrigger.create(scope); } /** - * Creates a rank-6 constant of {@code long} elements. + * Increments 'ref' until it reaches 'limit'. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data type for {@code output()} output + * @param ref Should be from a scalar `Variable` node. + * @param limit If incrementing ref would bring it above limit, instead generates an + * 'OutOfRange' error. + * @return a new instance of CountUpTo */ - public Constant constant(long[][][][][][] data) { - return Constant.create(scope, data); + public CountUpTo countUpTo(Operand ref, Long limit) { + return CountUpTo.create(scope, ref, limit); } /** - * Creates a rank-5 constant of {@code String} elements, each represented as an array of {@code byte}s. + * Makes a copy of `x`. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. + * @param data type for {@code y()} output + * @param x The source tensor of type `T`. + * @return a new instance of DeepCopy */ - public Constant constant(byte[][][][][][] data) { - return Constant.create(scope, data); + public DeepCopy deepCopy(Operand x) { + return DeepCopy.create(scope, x); } /** - * Creates a rank-4 constant of {@code float} elements. + * Delete the tensor specified by its handle in the session. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param handle The handle for a tensor stored in the session state. + * @return a new instance of DeleteSessionTensor */ - public Constant constant(float[][][][] data) { - return Constant.create(scope, data); + public DeleteSessionTensor deleteSessionTensor(Operand handle) { + return DeleteSessionTensor.create(scope, handle); } /** - * Creates a rank-1 constant of {@code boolean} elements. + * Deletes the resource specified by the handle. + *

+ * All subsequent operations using the resource will result in a NotFound + * error status. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param resource handle to the resource to delete. + * @param options carries optional attributes values + * @return a new instance of DestroyResourceOp */ - public Constant constant(boolean[] data) { - return Constant.create(scope, data); + public DestroyResourceOp destroyResourceOp(Operand resource, + DestroyResourceOp.Options... options) { + return DestroyResourceOp.create(scope, resource, options); } /** - * Creates a rank-1 constant of {@code double} elements. + * Destroys the temporary variable and returns its final value. + *

+ * Sets output to the value of the Tensor pointed to by 'ref', then destroys + * the temporary variable called 'var_name'. + * All other uses of 'ref' must have executed before this op. + * This is typically achieved by chaining the ref through each assign op, or by + * using control dependencies. + *

+ * Outputs the final value of the tensor pointed to by 'ref'. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data type for {@code value()} output + * @param ref A reference to the temporary variable tensor. + * @param varName Name of the temporary variable, usually the name of the matching + * 'TemporaryVariable' op. + * @return a new instance of DestroyTemporaryVariable */ - public Constant constant(double[] data) { - return Constant.create(scope, data); + public DestroyTemporaryVariable destroyTemporaryVariable(Operand ref, + String varName) { + return DestroyTemporaryVariable.create(scope, ref, varName); } /** - * Creates a rank-6 constant of {@code boolean} elements. - * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * Partitions `data` into `num_partitions` tensors using indices from `partitions`. + *

+ * For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]` + * becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i` + * are placed in `outputs[i]` in lexicographic order of `js`, and the first + * dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`. + * In detail, + *

{@code
+   *      outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
+   *
+   *      outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
+   *  }
+ * `data.shape` must start with `partitions.shape`. + *

+ * For example: + *

{@code
+   *      # Scalar partitions.
+   *      partitions = 1
+   *      num_partitions = 2
+   *      data = [10, 20]
+   *      outputs[0] = []  # Empty with shape [0, 2]
+   *      outputs[1] = [[10, 20]]
+   *
+   *      # Vector partitions.
+   *      partitions = [0, 0, 1, 1, 0]
+   *      num_partitions = 2
+   *      data = [10, 20, 30, 40, 50]
+   *      outputs[0] = [10, 20, 50]
+   *      outputs[1] = [30, 40]
+   *  }
+ * See `dynamic_stitch` for an example on how to merge partitions back. + *

+ *

+ * + *
+ * + * @param data type for {@code outputs()} output + * @param data + * @param partitions Any shape. Indices in the range `[0, num_partitions)`. + * @param numPartitions The number of partitions to output. + * @return a new instance of DynamicPartition */ - public Constant constant(boolean[][][][][][] data) { - return Constant.create(scope, data); + public DynamicPartition dynamicPartition(Operand data, + Operand partitions, Long numPartitions) { + return DynamicPartition.create(scope, data, partitions, numPartitions); } /** - * Creates a rank-4 constant of {@code boolean} elements. + * Interleave the values from the `data` tensors into a single tensor. + *

+ * Builds a merged tensor such that + *

{@code
+   *      merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
+   *  }
+ * For example, if each `indices[m]` is scalar or vector, we have + *
{@code
+   *      # Scalar indices:
+   *      merged[indices[m], ...] = data[m][...]
    *
-   * @param scope is a scope used to add the underlying operation.
-   * @param data An array containing the values to put into the new constant. The dimensions of the
-   *      new constant will match those of the array.
+   *      # Vector indices:
+   *      merged[indices[m][i], ...] = data[m][i, ...]
+   *  }
+ * Each `data[i].shape` must start with the corresponding `indices[i].shape`, + * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we + * must have `data[i].shape = indices[i].shape + constant`. In terms of this + * `constant`, the output shape is + *

+ * merged.shape = [max(indices)] + constant + *

+ * Values are merged in order, so if an index appears in both `indices[m][i]` and + * `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the + * merged result. If you do not need this guarantee, ParallelDynamicStitch might + * perform better on some devices. + *

+ * For example: + *

{@code
+   *      indices[0] = 6
+   *      indices[1] = [4, 1]
+   *      indices[2] = [[5, 2], [0, 3]]
+   *      data[0] = [61, 62]
+   *      data[1] = [[41, 42], [11, 12]]
+   *      data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
+   *      merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
+   *                [51, 52], [61, 62]]
+   *  }
+ * This method can be used to merge partitions created by `dynamic_partition` + * as illustrated on the following example: + *
{@code
+   *      # Apply function (increments x_i) on elements for which a certain condition
+   *      # apply (x_i != -1 in this example).
+   *      x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
+   *      condition_mask=tf.not_equal(x,tf.constant(-1.))
+   *      partitioned_data = tf.dynamic_partition(
+   *          x, tf.cast(condition_mask, tf.int32) , 2)
+   *      partitioned_data[1] = partitioned_data[1] + 1.0
+   *      condition_indices = tf.dynamic_partition(
+   *          tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
+   *      x = tf.dynamic_stitch(condition_indices, partitioned_data)
+   *      # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
+   *      # unchanged.
+   *  }
+ *
+ * + *
+ * + * @param data type for {@code merged()} output + * @param indices + * @param data + * @return a new instance of DynamicStitch */ - public Constant constant(boolean[][][][] data) { - return Constant.create(scope, data); + public DynamicStitch dynamicStitch(Iterable> indices, + Iterable> data) { + return DynamicStitch.create(scope, indices, data); } /** - * Creates a rank-6 constant of {@code float} elements. + * Computes the (possibly normalized) Levenshtein Edit Distance. + *

+ * The inputs are variable-length sequences provided by SparseTensors + * (hypothesis_indices, hypothesis_values, hypothesis_shape) + * and + * (truth_indices, truth_values, truth_shape). + *

+ * The inputs are: * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param hypothesisIndices The indices of the hypothesis list SparseTensor. + * This is an N x R int64 matrix. + * @param hypothesisValues The values of the hypothesis list SparseTensor. + * This is an N-length vector. + * @param hypothesisShape The shape of the hypothesis list SparseTensor. + * This is an R-length vector. + * @param truthIndices The indices of the truth list SparseTensor. + * This is an M x R int64 matrix. + * @param truthValues The values of the truth list SparseTensor. + * This is an M-length vector. + * @param truthShape truth indices, vector. + * @param options carries optional attributes values + * @return a new instance of EditDistance */ - public Constant constant(float[][][][][][] data) { - return Constant.create(scope, data); + public EditDistance editDistance(Operand hypothesisIndices, + Operand hypothesisValues, Operand hypothesisShape, Operand truthIndices, + Operand truthValues, Operand truthShape, EditDistance.Options... options) { + return EditDistance.create(scope, hypothesisIndices, hypothesisValues, hypothesisShape, truthIndices, truthValues, truthShape, options); } /** - * Creates a rank-2 constant of {@code long} elements. + * Creates a tensor with the given shape. + *

+ * This operation creates a tensor of `shape` and `dtype`. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data type for {@code output()} output + * @param shape 1-D. Represents the shape of the output tensor. + * @param dtype + * @param options carries optional attributes values + * @return a new instance of Empty */ - public Constant constant(long[][] data) { - return Constant.create(scope, data); + public Empty empty(Operand shape, DataType dtype, + Empty.Options... options) { + return Empty.create(scope, shape, dtype, options); } /** - * Creates a rank-2 constant of {@code double} elements. + * Creates and returns an empty tensor list. + *

+ * All list elements must be tensors of dtype element_dtype and shape compatible + * with element_shape. + *

+ * handle: an empty tensor list. + * element_dtype: the type of elements in the list. + * element_shape: a shape compatible with that of elements in the list. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param elementShape + * @param maxNumElements + * @param elementDtype + * @return a new instance of EmptyTensorList */ - public Constant constant(double[][] data) { - return Constant.create(scope, data); + public EmptyTensorList emptyTensorList( + Operand elementShape, Operand maxNumElements, DataType elementDtype) { + return EmptyTensorList.create(scope, elementShape, maxNumElements, elementDtype); } /** - * Creates a rank-6 constant of {@code double} elements. + * Ensures that the tensor's shape matches the expected shape. + *

+ * Raises an error if the input tensor's shape does not match the specified shape. + * Returns the input tensor otherwise. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data type for {@code output()} output + * @param input A tensor, whose shape is to be validated. + * @param shape The expected (possibly partially specified) shape of the input tensor. + * @return a new instance of EnsureShape */ - public Constant constant(double[][][][][][] data) { - return Constant.create(scope, data); + public EnsureShape ensureShape(Operand input, Shape shape) { + return EnsureShape.create(scope, input, shape); } /** - * Creates a constant containing a single {@code String} element, represented as an array of {@code byte}s. + * Inserts a dimension of 1 into a tensor's shape. + *

+ * Given a tensor `input`, this operation inserts a dimension of 1 at the + * dimension index `axis` of `input`'s shape. The dimension index `axis` starts at + * zero; if you specify a negative number for `axis` it is counted backward from + * the end. + *

+ * This operation is useful if you want to add a batch dimension to a single + * element. For example, if you have a single image of shape `[height, width, + * channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, + * which will make the shape `[1, height, width, channels]`. + *

+ * Other examples: + *

{@code
+   *  # 't' is a tensor of shape [2]
+   *  shape(expand_dims(t, 0)) ==> [1, 2]
+   *  shape(expand_dims(t, 1)) ==> [2, 1]
+   *  shape(expand_dims(t, -1)) ==> [2, 1]
    *
-   * @param scope is a scope used to add the underlying operation.
-   * @param data An array containing the values to put into the new constant. String elements are
-   *      sequences of bytes from the last array dimension.
+   *  # 't2' is a tensor of shape [2, 3, 5]
+   *  shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
+   *  shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
+   *  shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
+   *  }
+ * This operation requires that: + *

+ * `-1-input.dims() <= dim <= input.dims()` + *

+ * This operation is related to `squeeze()`, which removes dimensions of + * size 1. + * + * @param data type for {@code output()} output + * @param input + * @param axis 0-D (scalar). Specifies the dimension index at which to + * expand the shape of `input`. Must be in the range + * `[-rank(input) - 1, rank(input)]`. + * @return a new instance of ExpandDims */ - public Constant constant(byte[] data) { - return Constant.create(scope, data); + public ExpandDims expandDims(Operand input, + Operand axis) { + return ExpandDims.create(scope, input, axis); } /** - * Creates a rank-6 constant of {@code int} elements. + * Extract `patches` from `input` and put them in the "depth" output dimension. 3D extension of `extract_image_patches`. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data type for {@code patches()} output + * @param input 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`. + * @param ksizes The size of the sliding window for each dimension of `input`. + * @param strides 1-D of length 5. How far the centers of two consecutive patches are in + * `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. + * @param padding The type of padding algorithm to use. + *

+ * We specify the size-related attributes as: + *

{@code
+   *        ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]
+   *        strides = [1, stride_planes, strides_rows, strides_cols, 1]
+   *  }
+ * @return a new instance of ExtractVolumePatches */ - public Constant constant(int[][][][][][] data) { - return Constant.create(scope, data); + public ExtractVolumePatches extractVolumePatches(Operand input, + List ksizes, List strides, String padding) { + return ExtractVolumePatches.create(scope, input, ksizes, strides, padding); } /** - * Creates a rank-5 constant of {@code boolean} elements. - * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * Creates a tensor filled with a scalar value. + *

+ * This operation creates a tensor of shape `dims` and fills it with `value`. + *

+ * For example: + *

{@code
+   *  # Output tensor has shape [2, 3].
+   *  fill([2, 3], 9) ==> [[9, 9, 9]
+   *                       [9, 9, 9]]
+   *  }
+ * `tf.fill` differs from `tf.constant` in a few ways: + *
    + *
  • + * `tf.fill` only supports scalar contents, whereas `tf.constant` supports + * Tensor values. + *
  • + *
  • + * `tf.fill` creates an Op in the computation graph that constructs the actual + * Tensor value at runtime. This is in contrast to `tf.constant` which embeds + * the entire Tensor into the graph with a `Const` node. + *
  • + *
  • + * Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes + * based on other runtime Tensors, unlike `tf.constant`. + * + * @param data type for {@code output()} output + * @param dims 1-D. Represents the shape of the output tensor. + * @param value 0-D (scalar). Value to fill the returned tensor. + *

    + * @compatibility(numpy) Equivalent to np.full + * @end_compatibility + * @return a new instance of Fill */ - public Constant constant(boolean[][][][][] data) { - return Constant.create(scope, data); + public Fill fill(Operand dims, Operand value) { + return Fill.create(scope, dims, value); } /** - * Creates a rank-1 constant of {@code int} elements. + * Generates fingerprint values. + *

    + * Generates fingerprint values of `data`. + *

    + * Fingerprint op considers the first dimension of `data` as the batch dimension, + * and `output[i]` contains the fingerprint value generated from contents in + * `data[i, ...]` for all `i`. + *

    + * Fingerprint op writes fingerprint values as byte arrays. For example, the + * default method `farmhash64` generates a 64-bit fingerprint value at a time. + * This 8-byte value is written out as an `uint8` array of size 8, in little-endian + * order. + *

    + * For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4), + * and that the fingerprint method is `farmhash64`. In this case, the output shape + * is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of + * each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in + * `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers + * in `data[1, :, :]`. + *

    + * Note that this op fingerprints the raw underlying buffer, and it does not + * fingerprint Tensor's metadata such as data type and/or shape. For example, the + * fingerprint values are invariant under reshapes and bitcasts as long as the + * batch dimension remain the same: + *

    {@code
    +   *  Fingerprint(data) == Fingerprint(Reshape(data, ...))
    +   *  Fingerprint(data) == Fingerprint(Bitcast(data, ...))
    +   *  }
    + * For string data, one should expect `Fingerprint(data) != + * Fingerprint(ReduceJoin(data))` in general. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data Must have rank 1 or higher. + * @param method Fingerprint method used by this op. Currently available method is + * `farmhash::fingerprint64`. + * @return a new instance of Fingerprint */ - public Constant constant(int[] data) { - return Constant.create(scope, data); + public Fingerprint fingerprint(Operand data, Operand method) { + return Fingerprint.create(scope, data, method); } /** - * Creates a rank-2 constant of {@code boolean} elements. + * Gather slices from `params` axis `axis` according to `indices`. + *

    + * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + * Produces an output tensor with shape `params.shape[:axis] + indices.shape + + * params.shape[axis + 1:]` where: + *

    {@code
    +   *      # Scalar indices (output is rank(params) - 1).
    +   *      output[a_0, ..., a_n, b_0, ..., b_n] =
    +   *        params[a_0, ..., a_n, indices, b_0, ..., b_n]
        *
    -   * @param scope is a scope used to add the underlying operation.
    -   * @param data An array containing the values to put into the new constant. The dimensions of the
    -   *      new constant will match those of the array.
    +   *      # Vector indices (output is rank(params)).
    +   *      output[a_0, ..., a_n, i, b_0, ..., b_n] =
    +   *        params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
    +   *
    +   *      # Higher rank indices (output is rank(params) + rank(indices) - 1).
    +   *      output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
    +   *        params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
    +   *  }
    + *
    + * + *
    + *

    + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, a 0 is stored in the + * corresponding output value. + *

    + * See also `tf.batch_gather` and `tf.gather_nd`. + * + * @param data type for {@code output()} output + * @param params The tensor from which to gather values. Must be at least rank + * `axis + 1`. + * @param indices Index tensor. Must be in range `[0, params.shape[axis])`. + * @param axis The axis in `params` to gather `indices` from. Defaults to the first + * dimension. Supports negative indexes. + * @param options carries optional attributes values + * @return a new instance of Gather */ - public Constant constant(boolean[][] data) { - return Constant.create(scope, data); + public Gather gather(Operand params, + Operand indices, Operand axis, Gather.Options... options) { + return Gather.create(scope, params, indices, axis, options); } /** - * Creates a rank-1 constant of {@code long} elements. + * Gather slices from `params` into a Tensor with shape specified by `indices`. + *

    + * `indices` is a K-dimensional integer tensor, best thought of as a + * (K-1)-dimensional tensor of indices into `params`, where each element defines a + * slice of `params`: + *

    + * output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]] + *

    + * Whereas in `tf.gather` `indices` defines slices into the `axis` + * dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the + * first `N` dimensions of `params`, where `N = indices.shape[-1]`. + *

    + * The last dimension of `indices` can be at most the rank of + * `params`: + *

    + * indices.shape[-1] <= params.rank + *

    + * The last dimension of `indices` corresponds to elements + * (if `indices.shape[-1] == params.rank`) or slices + * (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` + * of `params`. The output tensor has shape + *

    + * indices.shape[:-1] + params.shape[indices.shape[-1]:] + *

    + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, a 0 is stored in the + * corresponding output value. + *

    + * Some examples below. + *

    + * Simple indexing into a matrix: + *

    {@code
    +   *      indices = [[0, 0], [1, 1]]
    +   *      params = [['a', 'b'], ['c', 'd']]
    +   *      output = ['a', 'd']
    +   *  }
    + * Slice indexing into a matrix: + *
    {@code
    +   *      indices = [[1], [0]]
    +   *      params = [['a', 'b'], ['c', 'd']]
    +   *      output = [['c', 'd'], ['a', 'b']]
    +   *  }
    + * Indexing into a 3-tensor: + *
    {@code
    +   *      indices = [[1]]
    +   *      params = [[['a0', 'b0'], ['c0', 'd0']],
    +   *                [['a1', 'b1'], ['c1', 'd1']]]
    +   *      output = [[['a1', 'b1'], ['c1', 'd1']]]
        *
    -   * @param scope is a scope used to add the underlying operation.
    -   * @param data An array containing the values to put into the new constant. The dimensions of the
    -   *      new constant will match those of the array.
    +   *
    +   *      indices = [[0, 1], [1, 0]]
    +   *      params = [[['a0', 'b0'], ['c0', 'd0']],
    +   *                [['a1', 'b1'], ['c1', 'd1']]]
    +   *      output = [['c0', 'd0'], ['a1', 'b1']]
    +   *
    +   *
    +   *      indices = [[0, 0, 1], [1, 0, 1]]
    +   *      params = [[['a0', 'b0'], ['c0', 'd0']],
    +   *                [['a1', 'b1'], ['c1', 'd1']]]
    +   *      output = ['b0', 'b1']
    +   *  }
    + * Batched indexing into a matrix: + *
    {@code
    +   *      indices = [[[0, 0]], [[0, 1]]]
    +   *      params = [['a', 'b'], ['c', 'd']]
    +   *      output = [['a'], ['b']]
    +   *  }
    + * Batched slice indexing into a matrix: + *
    {@code
    +   *      indices = [[[1]], [[0]]]
    +   *      params = [['a', 'b'], ['c', 'd']]
    +   *      output = [[['c', 'd']], [['a', 'b']]]
    +   *  }
    + * Batched indexing into a 3-tensor: + *
    {@code
    +   *      indices = [[[1]], [[0]]]
    +   *      params = [[['a0', 'b0'], ['c0', 'd0']],
    +   *                [['a1', 'b1'], ['c1', 'd1']]]
    +   *      output = [[[['a1', 'b1'], ['c1', 'd1']]],
    +   *                [[['a0', 'b0'], ['c0', 'd0']]]]
    +   *
    +   *      indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
    +   *      params = [[['a0', 'b0'], ['c0', 'd0']],
    +   *                [['a1', 'b1'], ['c1', 'd1']]]
    +   *      output = [[['c0', 'd0'], ['a1', 'b1']],
    +   *                [['a0', 'b0'], ['c1', 'd1']]]
    +   *
    +   *
    +   *      indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
    +   *      params = [[['a0', 'b0'], ['c0', 'd0']],
    +   *                [['a1', 'b1'], ['c1', 'd1']]]
    +   *      output = [['b0', 'b1'], ['d0', 'c1']]
    +   *  }
    + * See also `tf.gather` and `tf.batch_gather`. + * + * @param data type for {@code output()} output + * @param params The tensor from which to gather values. + * @param indices Index tensor. + * @return a new instance of GatherNd */ - public Constant constant(long[] data) { - return Constant.create(scope, data); + public GatherNd gatherNd(Operand params, + Operand indices) { + return GatherNd.create(scope, params, indices); } /** - * Creates a rank-2 constant of {@code String} elements, each represented as an array of {@code byte}s. + * Store the input tensor in the state of the current session. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. + * @param value The tensor to be stored. + * @return a new instance of GetSessionHandle */ - public Constant constant(byte[][][] data) { - return Constant.create(scope, data); + public GetSessionHandle getSessionHandle(Operand value) { + return GetSessionHandle.create(scope, value); } /** - * Creates a rank-2 constant of {@code float} elements. + * Get the value of the tensor specified by its handle. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data type for {@code value()} output + * @param handle The handle for a tensor stored in the session state. + * @param dtype The type of the output value. + * @return a new instance of GetSessionTensor */ - public Constant constant(float[][] data) { - return Constant.create(scope, data); + public GetSessionTensor getSessionTensor(Operand handle, + DataType dtype) { + return GetSessionTensor.create(scope, handle, dtype); } /** - * Creates a rank-4 constant of {@code double} elements. + * Adds gradients computation ops to the graph according to scope. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param scope current graph scope + * @param y outputs of the function to derive + * @param x inputs of the function for which partial derivatives are computed + * @param options carries optional attributes values + * @return a new instance of {@code Gradients} + * @throws IllegalArgumentException if execution environment is not a graph */ - public Constant constant(double[][][][] data) { - return Constant.create(scope, data); + public Gradients gradients(Iterable> y, Iterable> x, + Gradients.Options... options) { + return Gradients.create(scope, y, x, options); } /** - * Creates a constant containing a single {@code float} element. + * Adds operations to compute the partial derivatives of sum of {@code y}s w.r.t {@code x}s, + * i.e., {@code d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2...} + *

    + * If {@code Options.dx()} values are set, they are as the initial symbolic partial derivatives of some loss + * function {@code L} w.r.t. {@code y}. {@code Options.dx()} must have the size of {@code y}. + *

    + * If {@code Options.dx()} is not set, the implementation will use dx of {@code OnesLike} for all + * shapes in {@code y}. + *

    + * The partial derivatives are returned in output {@code dy}, with the size of {@code x}. + *

    + * Example of usage: + *

    {@code
    +   *  Gradients gradients = tf.gradients(loss, Arrays.asList(w, b));
    +   *  Scalar alpha = ops.scalar(1.0f);
    +   *  tf.train.applyGradientDescent(w, alpha, gradients.dy(0));
    +   *  tf.train.applyGradientDescent(b, alpha, gradients.dy(1));
    +   *  }
    * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a float constant + * @param y output of the function to derive + * @param x inputs of the function for which partial derivatives are computed + * @param options carries optional attributes values + * @return a new instance of {@code Gradients} + * @throws IllegalArgumentException if execution environment is not a graph */ - public Constant constant(float data) { - return Constant.create(scope, data); + public Gradients gradients(Operand y, Iterable> x, + Gradients.Options... options) { + return Gradients.create(scope, y, x, options); } /** - * Creates a rank-1 constant of {@code float} elements. + * Gives a guarantee to the TF runtime that the input tensor is a constant. + *

    + * The runtime is then free to make optimizations based on this. + *

    + * Only accepts value typed tensors as inputs and rejects resource variable handles + * as input. + *

    + * Returns the input tensor without modification. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data type for {@code output()} output + * @param input + * @return a new instance of GuaranteeConst */ - public Constant constant(float[] data) { - return Constant.create(scope, data); + public GuaranteeConst guaranteeConst(Operand input) { + return GuaranteeConst.create(scope, input); } /** - * Creates a rank-4 constant of {@code long} elements. + * Creates a non-initialized hash table. + *

    + * This op creates a hash table, specifying the type of its keys and values. + * Before using the table you will have to initialize it. After initialization the + * table will be immutable. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attributes values + * @return a new instance of HashTable */ - public Constant constant(long[][][][] data) { - return Constant.create(scope, data); + public HashTable hashTable(DataType keyDtype, + DataType valueDtype, HashTable.Options... options) { + return HashTable.create(scope, keyDtype, valueDtype, options); } /** - * Creates a constant containing a single {@code double} element. + * Return histogram of values. + *

    + * Given the tensor `values`, this operation returns a rank 1 histogram counting + * the number of entries in `values` that fall into every bin. The bins are + * equal width and determined by the arguments `value_range` and `nbins`. + *

    {@code
    +   *  # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
    +   *  nbins = 5
    +   *  value_range = [0.0, 5.0]
    +   *  new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
        *
    -   * @param scope is a scope used to add the underlying operation.
    -   * @param data The value to put into the new constant.
    -   * @return a double constant
    +   *  with tf.get_default_session() as sess:
    +   *    hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
    +   *    variables.global_variables_initializer().run()
    +   *    sess.run(hist) => [2, 1, 1, 0, 2]
    +   *  }
    + * + * @param data type for {@code out()} output + * @param values Numeric `Tensor`. + * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. + * values <= value_range[0] will be mapped to hist[0], + * values >= value_range[1] will be mapped to hist[-1]. + * @param nbins Scalar `int32 Tensor`. Number of histogram bins. + * @return a new instance of HistogramFixedWidth */ - public Constant constant(double data) { - return Constant.create(scope, data); + public HistogramFixedWidth histogramFixedWidth(Operand values, + Operand valueRange, Operand nbins) { + return HistogramFixedWidth.create(scope, values, valueRange, nbins); } /** - * Creates a rank-2 constant of {@code int} elements. + * Return histogram of values. + *

    + * Given the tensor `values`, this operation returns a rank 1 histogram counting + * the number of entries in `values` that fall into every bin. The bins are + * equal width and determined by the arguments `value_range` and `nbins`. + *

    {@code
    +   *  # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
    +   *  nbins = 5
    +   *  value_range = [0.0, 5.0]
    +   *  new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
        *
    -   * @param scope is a scope used to add the underlying operation.
    -   * @param data An array containing the values to put into the new constant. The dimensions of the
    -   *      new constant will match those of the array.
    +   *  with tf.get_default_session() as sess:
    +   *    hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
    +   *    variables.global_variables_initializer().run()
    +   *    sess.run(hist) => [2, 1, 1, 0, 2]
    +   *  }
    + * + * @param data type for {@code out()} output + * @param values Numeric `Tensor`. + * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. + * values <= value_range[0] will be mapped to hist[0], + * values >= value_range[1] will be mapped to hist[-1]. + * @param nbins Scalar `int32 Tensor`. Number of histogram bins. + * @param dtype + * @return a new instance of HistogramFixedWidth */ - public Constant constant(int[][] data) { - return Constant.create(scope, data); + public HistogramFixedWidth histogramFixedWidth( + Operand values, Operand valueRange, Operand nbins, DataType dtype) { + return HistogramFixedWidth.create(scope, values, valueRange, nbins, dtype); } /** - * Creates a rank-5 constant of {@code float} elements. + * Return a tensor with the same shape and contents as the input tensor or value. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data type for {@code output()} output + * @param input + * @return a new instance of Identity */ - public Constant constant(float[][][][][] data) { - return Constant.create(scope, data); + public Identity identity(Operand input) { + return Identity.create(scope, input); } /** - * Creates a rank-5 constant of {@code double} elements. + * Returns a list of tensors with the same shapes and contents as the input + *

    + * tensors. + *

    + * This op can be used to override the gradient for complicated functions. For + * example, suppose y = f(x) and we wish to apply a custom function g for backprop + * such that dx = g(dy). In Python, + *

    {@code
    +   *  with tf.get_default_graph().gradient_override_map(
    +   *      {'IdentityN': 'OverrideGradientWithG'}):
    +   *    y, _ = identity_n([f(x), x])
        *
    -   * @param scope is a scope used to add the underlying operation.
    -   * @param data An array containing the values to put into the new constant. The dimensions of the
    -   *      new constant will match those of the array.
    +   * @tf.RegisterGradient('OverrideGradientWithG') def ApplyG(op, dy, _):
    +   *    return [None, g(dy)]  # Do not backprop to f(x).
    +   *  }
    + * @param input + * @return a new instance of IdentityN */ - public Constant constant(double[][][][][] data) { - return Constant.create(scope, data); + public IdentityN identityN(Iterable> input) { + return IdentityN.create(scope, input); } /** - * Creates a {@code String} constant using the default, UTF-8 encoding. + * Returns immutable tensor from memory region. + *

    + * The current implementation memmaps the tensor from a file. * - * @param scope is a scope used to add the underlying operation. - * @param data The string to put into the new constant. - * @return a string constant + * @param data type for {@code tensor()} output + * @param dtype Type of the returned tensor. + * @param shape Shape of the returned tensor. + * @param memoryRegionName Name of readonly memory region used by the tensor, see + * NewReadOnlyMemoryRegionFromFile in tensorflow::Env. + * @return a new instance of ImmutableConst */ - public Constant constant(String data) { - return Constant.create(scope, data); + public ImmutableConst immutableConst(DataType dtype, Shape shape, + String memoryRegionName) { + return ImmutableConst.create(scope, dtype, shape, memoryRegionName); } /** - * Creates a rank-3 constant of {@code boolean} elements. + * Table initializer that takes two tensors for keys and values respectively. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param tableHandle Handle to a table which will be initialized. + * @param keys Keys of type Tkey. + * @param values Values of type Tval. + * @return a new instance of InitializeTable */ - public Constant constant(boolean[][][] data) { - return Constant.create(scope, data); + public InitializeTable initializeTable(Operand tableHandle, + Operand keys, Operand values) { + return InitializeTable.create(scope, tableHandle, keys, values); } /** - * Creates a rank-3 constant of {@code float} elements. + * Initializes a table from a text file. + *

    + * It inserts one key-value pair into the table for each line of the file. + * The key and value is extracted from the whole line content, elements from the + * split line based on `delimiter` or the line number (starting from zero). + * Where to extract the key and value from a line is specified by `key_index` and + * `value_index`. + *

    + * - A value of -1 means use the line number(starting from zero), expects `int64`. + * - A value of -2 means use the whole line content, expects `string`. + * - A value >= 0 means use the index (starting at zero) of the split line based + * on `delimiter`. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param tableHandle Handle to a table which will be initialized. + * @param filename Filename of a vocabulary text file. + * @param keyIndex Column index in a line to get the table `key` values from. + * @param valueIndex Column index that represents information of a line to get the table + * `value` values from. + * @param options carries optional attributes values + * @return a new instance of InitializeTableFromTextFile */ - public Constant constant(float[][][] data) { - return Constant.create(scope, data); + public InitializeTableFromTextFile initializeTableFromTextFile(Operand tableHandle, + Operand filename, Long keyIndex, Long valueIndex, + InitializeTableFromTextFile.Options... options) { + return InitializeTableFromTextFile.create(scope, tableHandle, filename, keyIndex, valueIndex, options); } /** - * Creates a rank-5 constant of {@code int} elements. + * Adds v into specified rows of x. + *

    + * Computes y = x; y[i, :] += v; return y. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. + * @param data type for {@code y()} output + * @param x A `Tensor` of type T. + * @param i A vector. Indices into the left-most dimension of `x`. + * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. + * @return a new instance of InplaceAdd */ - public Constant constant(int[][][][][] data) { - return Constant.create(scope, data); + public InplaceAdd inplaceAdd(Operand x, Operand i, Operand v) { + return InplaceAdd.create(scope, x, i, v); } /** - * Creates a constant containing a single {@code long} element. + * Subtracts `v` into specified rows of `x`. + *

    + * Computes y = x; y[i, :] -= v; return y. * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a long constant + * @param data type for {@code y()} output + * @param x A `Tensor` of type T. + * @param i A vector. Indices into the left-most dimension of `x`. + * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. + * @return a new instance of InplaceSub */ - public Constant constant(long data) { - return Constant.create(scope, data); + public InplaceSub inplaceSub(Operand x, Operand i, Operand v) { + return InplaceSub.create(scope, x, i, v); } /** - * Create a constant from a Tensor. + * Updates specified rows with values in `v`. + *

    + * Computes `x[i, :] = v; return x`. * - * @param scope is a scope used to add the underlying operation. - * @param tensor a Tensor holding the constant value - * @return a constant of the same data type as `tensor` + * @param data type for {@code y()} output + * @param x A tensor of type `T`. + * @param i A vector. Indices into the left-most dimension of `x`. + * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. + * @return a new instance of InplaceUpdate */ - public Constant constant(Tensor tensor) { - return Constant.create(scope, tensor); + public InplaceUpdate inplaceUpdate(Operand x, Operand i, + Operand v) { + return InplaceUpdate.create(scope, x, i, v); } /** - * Creates a {@code String} constant using a specified encoding. + * Checks whether a tensor has been initialized. + *

    + * Outputs boolean scalar indicating whether the tensor has been initialized. * - * @param scope is a scope used to add the underlying operation. - * @param charset The encoding from String to bytes. - * @param data The string to put into the new constant. - * @return a string constant + * @param ref Should be from a `Variable` node. May be uninitialized. + * @return a new instance of IsVariableInitialized */ - public Constant constant(String data, Charset charset) { - return Constant.create(scope, data, charset); + public IsVariableInitialized isVariableInitialized(Operand ref) { + return IsVariableInitialized.create(scope, ref); } /** - * Create a constant from a Java object. - * - *

    The argument {@code object} is first converted into a Tensor using {@link - * org.tensorflow.Tensor#create(Object)}, so only Objects supported by this method must be - * provided. For example: - * + * Generates values in an interval. + *

    + * A sequence of `num` evenly-spaced values are generated beginning at `start`. + * If `num > 1`, the values in the sequence increase by `stop - start / num - 1`, + * so that the last one is exactly `stop`. + *

    + * For example: *

    {@code
    -   *  Constant.create(scope, new int[]{{1, 2}, {3, 4}}, TInt32.DTYPE); // returns a 2x2 integer matrix
    +   *  tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
        *  }
    * - * @param scope is a scope used to add the underlying operation. - * @param object a Java object representing the constant. - * @return a constant of type `type` - * @see org.tensorflow.Tensor#create(Object) Tensor.create + * @param data type for {@code output()} output + * @param start 0-D tensor. First entry in the range. + * @param stop 0-D tensor. Last entry in the range. + * @param num 0-D tensor. Number of values to generate. + * @return a new instance of LinSpace */ - public Constant constant(Object object, DataType type) { - return Constant.create(scope, object, type); + public LinSpace linSpace(Operand start, + Operand stop, Operand num) { + return LinSpace.create(scope, start, stop, num); } /** - * Create a {@link TFloat64} constant with data from the given buffer. - * - *

    Creates a constant with the given shape by copying elements from the buffer (starting from - * its current position) into the tensor. For example, if {@code shape = {2,3} } (which represents - * a 2x3 matrix) then the buffer must have 6 elements remaining, which will be consumed by this - * method. + * Outputs all keys and values in the table. * - * @param scope is a scope used to add the underlying operation. - * @param shape the tensor shape. - * @param data a buffer containing the tensor data. - * @return a double constant - * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @param data type for {@code keys()} output + * @param data type for {@code values()} output + * @param tableHandle Handle to the table. + * @param Tkeys + * @param Tvalues + * @return a new instance of LookupTableExport */ - public Constant constant(long[] shape, DoubleBuffer data) { - return Constant.create(scope, shape, data); + public LookupTableExport lookupTableExport( + Operand tableHandle, DataType Tkeys, DataType Tvalues) { + return LookupTableExport.create(scope, tableHandle, Tkeys, Tvalues); } /** - * Create a {@link TInt32} constant with data from the given buffer. - * - *

    Creates a constant with the given shape by copying elements from the buffer (starting from - * its current position) into the tensor. For example, if {@code shape = {2,3} } (which represents - * a 2x3 matrix) then the buffer must have 6 elements remaining, which will be consumed by this - * method. + * Looks up keys in a table, outputs the corresponding values. + *

    + * The tensor `keys` must of the same type as the keys of the table. + * The output `values` is of the type of the table values. + *

    + * The scalar `default_value` is the value output for keys not present in the + * table. It must also be of the same type as the table values. * - * @param scope is a scope used to add the underlying operation. - * @param shape the tensor shape. - * @param data a buffer containing the tensor data. - * @return an integer constant - * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @param data type for {@code values()} output + * @param tableHandle Handle to the table. + * @param keys Any shape. Keys to look up. + * @param defaultValue + * @return a new instance of LookupTableFind */ - public Constant constant(long[] shape, IntBuffer data) { - return Constant.create(scope, shape, data); + public LookupTableFind lookupTableFind( + Operand tableHandle, Operand keys, Operand defaultValue) { + return LookupTableFind.create(scope, tableHandle, keys, defaultValue); } /** - * Create a {@link TInt64} constant with data from the given buffer. - * - *

    Creates a constant with the given shape by copying elements from the buffer (starting from - * its current position) into the tensor. For example, if {@code shape = {2,3} } (which represents - * a 2x3 matrix) then the buffer must have 6 elements remaining, which will be consumed by this - * method. + * Replaces the contents of the table with the specified keys and values. + *

    + * The tensor `keys` must be of the same type as the keys of the table. + * The tensor `values` must be of the type of the table values. * - * @param scope is a scope used to add the underlying operation. - * @param shape the tensor shape. - * @param data a buffer containing the tensor data. - * @return a long constant - * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @param tableHandle Handle to the table. + * @param keys Any shape. Keys to look up. + * @param values Values to associate with keys. + * @return a new instance of LookupTableImport */ - public Constant constant(long[] shape, LongBuffer data) { - return Constant.create(scope, shape, data); + public LookupTableImport lookupTableImport( + Operand tableHandle, Operand keys, Operand values) { + return LookupTableImport.create(scope, tableHandle, keys, values); } /** - * Create a {@link TFloat32} constant with data from the given buffer. - * - *

    Creates a constant with the given shape by copying elements from the buffer (starting from - * its current position) into the tensor. For example, if {@code shape = {2,3} } (which represents - * a 2x3 matrix) then the buffer must have 6 elements remaining, which will be consumed by this - * method. + * Updates the table to associates keys with values. + *

    + * The tensor `keys` must be of the same type as the keys of the table. + * The tensor `values` must be of the type of the table values. * - * @param scope is a scope used to add the underlying operation. - * @param shape the tensor shape. - * @param data a buffer containing the tensor data. - * @return a float constant - * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @param tableHandle Handle to the table. + * @param keys Any shape. Keys to look up. + * @param values Values to associate with keys. + * @return a new instance of LookupTableInsert */ - public Constant constant(long[] shape, FloatBuffer data) { - return Constant.create(scope, shape, data); + public LookupTableInsert lookupTableInsert( + Operand tableHandle, Operand keys, Operand values) { + return LookupTableInsert.create(scope, tableHandle, keys, values); } /** - * Create a constant with data from the given buffer. - * - *

    Creates a Constant with the provided shape of any type where the constant data has been - * encoded into {@code data} as per the specification of the TensorFlow C - * API. + * Computes the number of elements in the given table. * - * @param scope is a scope used to add the underlying operation. - * @param type the tensor datatype. - * @param shape the tensor shape. - * @param data a buffer containing the tensor data. - * @return a constant of type `type` - * @throws IllegalArgumentException If the tensor datatype or shape is not compatible with the - * buffer + * @param tableHandle Handle to the table. + * @return a new instance of LookupTableSize */ - public Constant constant(DataType type, long[] shape, ByteBuffer data) { - return Constant.create(scope, type, shape, data); + public LookupTableSize lookupTableSize(Operand tableHandle) { + return LookupTableSize.create(scope, tableHandle); } /** - * This op consumes a lock created by `MutexLock`. - *

    - * This op exists to consume a tensor created by `MutexLock` (other than - * direct control dependencies). It should be the only that consumes the tensor, - * and will raise an error if it is not. Its only purpose is to keep the - * mutex lock tensor alive until it is consumed by this op. + * Forwards the input to the output. *

    - * NOTE: This operation must run on the same device as its input. This may - * be enforced via the `colocate_with` mechanism. + * This operator represents the loop termination condition used by the + * "pivot" switches of a loop. * - * @param mutexLock A tensor returned by `MutexLock`. - * @return a new instance of ConsumeMutexLock + * @param input A boolean scalar, representing the branch predicate of the Switch op. + * @return a new instance of LoopCond */ - public ConsumeMutexLock consumeMutexLock(Operand mutexLock) { - return ConsumeMutexLock.create(scope, mutexLock); + public LoopCond loopCond(Operand input) { + return LoopCond.create(scope, input); } /** - * Does nothing. Serves as a control trigger for scheduling. - *

    - * Only useful as a placeholder for control edges. + * Op removes all elements in the underlying container. * - * @return a new instance of ControlTrigger + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapClear */ - public ControlTrigger controlTrigger() { - return ControlTrigger.create(scope); + public MapClear mapClear(List> dtypes, MapClear.Options... options) { + return MapClear.create(scope, dtypes, options); } /** - * Increments 'ref' until it reaches 'limit'. + * Op returns the number of incomplete elements in the underlying container. * - * @param data type for {@code output()} output - * @param ref Should be from a scalar `Variable` node. - * @param limit If incrementing ref would bring it above limit, instead generates an - * 'OutOfRange' error. - * @return a new instance of CountUpTo + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapIncompleteSize */ - public CountUpTo countUpTo(Operand ref, Long limit) { - return CountUpTo.create(scope, ref, limit); + public MapIncompleteSize mapIncompleteSize(List> dtypes, + MapIncompleteSize.Options... options) { + return MapIncompleteSize.create(scope, dtypes, options); } /** - * Makes a copy of `x`. + * Op peeks at the values at the specified key. If the + *

    + * underlying container does not contain this key + * this op will block until it does. * - * @param data type for {@code y()} output - * @param x The source tensor of type `T`. - * @return a new instance of DeepCopy + * @param key + * @param indices + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapPeek */ - public DeepCopy deepCopy(Operand x) { - return DeepCopy.create(scope, x); + public MapPeek mapPeek(Operand key, Operand indices, List> dtypes, + MapPeek.Options... options) { + return MapPeek.create(scope, key, indices, dtypes, options); } /** - * Delete the tensor specified by its handle in the session. + * Op returns the number of elements in the underlying container. * - * @param handle The handle for a tensor stored in the session state. - * @return a new instance of DeleteSessionTensor + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapSize */ - public DeleteSessionTensor deleteSessionTensor(Operand handle) { - return DeleteSessionTensor.create(scope, handle); + public MapSize mapSize(List> dtypes, MapSize.Options... options) { + return MapSize.create(scope, dtypes, options); } /** - * Deletes the resource specified by the handle. - *

    - * All subsequent operations using the resource will result in a NotFound - * error status. + * Stage (key, values) in the underlying container which behaves like a hashtable. * - * @param resource handle to the resource to delete. + * @param key int64 + * @param indices + * @param values a list of tensors + * dtypes A list of data types that inserted values should adhere to. + * @param dtypes * @param options carries optional attributes values - * @return a new instance of DestroyResourceOp + * @return a new instance of MapStage */ - public DestroyResourceOp destroyResourceOp(Operand resource, - DestroyResourceOp.Options... options) { - return DestroyResourceOp.create(scope, resource, options); + public MapStage mapStage(Operand key, Operand indices, + Iterable> values, List> dtypes, MapStage.Options... options) { + return MapStage.create(scope, key, indices, values, dtypes, options); } /** - * Destroys the temporary variable and returns its final value. + * Op removes and returns the values associated with the key *

    - * Sets output to the value of the Tensor pointed to by 'ref', then destroys - * the temporary variable called 'var_name'. - * All other uses of 'ref' must have executed before this op. - * This is typically achieved by chaining the ref through each assign op, or by - * using control dependencies. - *

    - * Outputs the final value of the tensor pointed to by 'ref'. + * from the underlying container. If the underlying container + * does not contain this key, the op will block until it does. * - * @param data type for {@code value()} output - * @param ref A reference to the temporary variable tensor. - * @param varName Name of the temporary variable, usually the name of the matching - * 'TemporaryVariable' op. - * @return a new instance of DestroyTemporaryVariable + * @param key + * @param indices + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapUnstage */ - public DestroyTemporaryVariable destroyTemporaryVariable(Operand ref, - String varName) { - return DestroyTemporaryVariable.create(scope, ref, varName); + public MapUnstage mapUnstage(Operand key, Operand indices, + List> dtypes, MapUnstage.Options... options) { + return MapUnstage.create(scope, key, indices, dtypes, options); } /** - * Partitions `data` into `num_partitions` tensors using indices from `partitions`. - *

    - * For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]` - * becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i` - * are placed in `outputs[i]` in lexicographic order of `js`, and the first - * dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`. - * In detail, - *

    {@code
    -   *      outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
    -   *
    -   *      outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
    -   *  }
    - * `data.shape` must start with `partitions.shape`. - *

    - * For example: - *

    {@code
    -   *      # Scalar partitions.
    -   *      partitions = 1
    -   *      num_partitions = 2
    -   *      data = [10, 20]
    -   *      outputs[0] = []  # Empty with shape [0, 2]
    -   *      outputs[1] = [[10, 20]]
    -   *
    -   *      # Vector partitions.
    -   *      partitions = [0, 0, 1, 1, 0]
    -   *      num_partitions = 2
    -   *      data = [10, 20, 30, 40, 50]
    -   *      outputs[0] = [10, 20, 50]
    -   *      outputs[1] = [30, 40]
    -   *  }
    - * See `dynamic_stitch` for an example on how to merge partitions back. + * Op removes and returns a random (key, value) *

    - *

    - * - *
    + * from the underlying container. If the underlying container + * does not contain elements, the op will block until it does. * - * @param data type for {@code outputs()} output - * @param data - * @param partitions Any shape. Indices in the range `[0, num_partitions)`. - * @param numPartitions The number of partitions to output. - * @return a new instance of DynamicPartition + * @param indices + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapUnstageNoKey */ - public DynamicPartition dynamicPartition(Operand data, - Operand partitions, Long numPartitions) { - return DynamicPartition.create(scope, data, partitions, numPartitions); + public MapUnstageNoKey mapUnstageNoKey(Operand indices, List> dtypes, + MapUnstageNoKey.Options... options) { + return MapUnstageNoKey.create(scope, indices, dtypes, options); } /** - * Interleave the values from the `data` tensors into a single tensor. - *

    - * Builds a merged tensor such that - *

    {@code
    -   *      merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
    -   *  }
    - * For example, if each `indices[m]` is scalar or vector, we have - *
    {@code
    -   *      # Scalar indices:
    -   *      merged[indices[m], ...] = data[m][...]
    -   *
    -   *      # Vector indices:
    -   *      merged[indices[m][i], ...] = data[m][i, ...]
    -   *  }
    - * Each `data[i].shape` must start with the corresponding `indices[i].shape`, - * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we - * must have `data[i].shape = indices[i].shape + constant`. In terms of this - * `constant`, the output shape is - *

    - * merged.shape = [max(indices)] + constant - *

    - * Values are merged in order, so if an index appears in both `indices[m][i]` and - * `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the - * merged result. If you do not need this guarantee, ParallelDynamicStitch might - * perform better on some devices. + * Computes the maximum of elements across dimensions of a tensor. *

    - * For example: - *

    {@code
    -   *      indices[0] = 6
    -   *      indices[1] = [4, 1]
    -   *      indices[2] = [[5, 2], [0, 3]]
    -   *      data[0] = [61, 62]
    -   *      data[1] = [[41, 42], [11, 12]]
    -   *      data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
    -   *      merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
    -   *                [51, 52], [61, 62]]
    -   *  }
    - * This method can be used to merge partitions created by `dynamic_partition` - * as illustrated on the following example: - *
    {@code
    -   *      # Apply function (increments x_i) on elements for which a certain condition
    -   *      # apply (x_i != -1 in this example).
    -   *      x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
    -   *      condition_mask=tf.not_equal(x,tf.constant(-1.))
    -   *      partitioned_data = tf.dynamic_partition(
    -   *          x, tf.cast(condition_mask, tf.int32) , 2)
    -   *      partitioned_data[1] = partitioned_data[1] + 1.0
    -   *      condition_indices = tf.dynamic_partition(
    -   *          tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
    -   *      x = tf.dynamic_stitch(condition_indices, partitioned_data)
    -   *      # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
    -   *      # unchanged.
    -   *  }
    - *
    - * - *
    + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * - * @param data type for {@code merged()} output - * @param indices - * @param data - * @return a new instance of DynamicStitch + * @param data type for {@code output()} output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of Max */ - public DynamicStitch dynamicStitch(Iterable> indices, - Iterable> data) { - return DynamicStitch.create(scope, indices, data); + public Max max(Operand input, Operand axis, + Max.Options... options) { + return Max.create(scope, input, axis, options); } /** - * Computes the (possibly normalized) Levenshtein Edit Distance. + * Forwards the value of an available tensor from `inputs` to `output`. *

    - * The inputs are variable-length sequences provided by SparseTensors - * (hypothesis_indices, hypothesis_values, hypothesis_shape) - * and - * (truth_indices, truth_values, truth_shape). + * `Merge` waits for at least one of the tensors in `inputs` to become available. + * It is usually combined with `Switch` to implement branching. *

    - * The inputs are: + * `Merge` forwards the first tensor to become available to `output`, and sets + * `value_index` to its index in `inputs`. * - * @param hypothesisIndices The indices of the hypothesis list SparseTensor. - * This is an N x R int64 matrix. - * @param hypothesisValues The values of the hypothesis list SparseTensor. - * This is an N-length vector. - * @param hypothesisShape The shape of the hypothesis list SparseTensor. - * This is an R-length vector. - * @param truthIndices The indices of the truth list SparseTensor. - * This is an M x R int64 matrix. - * @param truthValues The values of the truth list SparseTensor. - * This is an M-length vector. - * @param truthShape truth indices, vector. - * @param options carries optional attributes values - * @return a new instance of EditDistance + * @param data type for {@code output()} output + * @param inputs The input tensors, exactly one of which will become available. + * @return a new instance of Merge */ - public EditDistance editDistance(Operand hypothesisIndices, - Operand hypothesisValues, Operand hypothesisShape, Operand truthIndices, - Operand truthValues, Operand truthShape, EditDistance.Options... options) { - return EditDistance.create(scope, hypothesisIndices, hypothesisValues, hypothesisShape, truthIndices, truthValues, truthShape, options); + public Merge merge(Iterable> inputs) { + return Merge.create(scope, inputs); } /** - * Creates a tensor with the given shape. + * Computes the minimum of elements across dimensions of a tensor. *

    - * This operation creates a tensor of `shape` and `dtype`. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * * @param data type for {@code output()} output - * @param shape 1-D. Represents the shape of the output tensor. - * @param dtype + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. * @param options carries optional attributes values - * @return a new instance of Empty + * @return a new instance of Min */ - public Empty empty(Operand shape, DataType dtype, - Empty.Options... options) { - return Empty.create(scope, shape, dtype, options); + public Min min(Operand input, Operand axis, + Min.Options... options) { + return Min.create(scope, input, axis, options); } /** - * Creates and returns an empty tensor list. + * Pads a tensor with mirrored values. *

    - * All list elements must be tensors of dtype element_dtype and shape compatible - * with element_shape. + * This operation pads a `input` with mirrored values according to the `paddings` + * you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is + * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + * how many values to add before the contents of `input` in that dimension, and + * `paddings[D, 1]` indicates how many values to add after the contents of `input` + * in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater + * than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true + * (if false, respectively). *

    - * handle: an empty tensor list. - * element_dtype: the type of elements in the list. - * element_shape: a shape compatible with that of elements in the list. + * The padded size of each dimension D of the output is: + *

    + * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + *

    + * For example: + *

    {@code
    +   *  # 't' is [[1, 2, 3], [4, 5, 6]].
    +   *  # 'paddings' is [[1, 1]], [2, 2]].
    +   *  # 'mode' is SYMMETRIC.
    +   *  # rank of 't' is 2.
    +   *  pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
    +   *                        [2, 1, 1, 2, 3, 3, 2]
    +   *                        [5, 4, 4, 5, 6, 6, 5]
    +   *                        [5, 4, 4, 5, 6, 6, 5]]
    +   *  }
    * - * @param elementShape - * @param maxNumElements - * @param elementDtype - * @return a new instance of EmptyTensorList + * @param data type for {@code output()} output + * @param input The input tensor to be padded. + * @param paddings A two-column matrix specifying the padding sizes. The number of + * rows must be the same as the rank of `input`. + * @param mode Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions + * do not include the borders, while in symmetric mode the padded regions + * do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings` + * is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and + * it is `[1, 2, 3, 3, 2]` in symmetric mode. + * @return a new instance of MirrorPad */ - public EmptyTensorList emptyTensorList( - Operand elementShape, Operand maxNumElements, DataType elementDtype) { - return EmptyTensorList.create(scope, elementShape, maxNumElements, elementDtype); + public MirrorPad mirrorPad(Operand input, + Operand paddings, String mode) { + return MirrorPad.create(scope, input, paddings, mode); } /** - * Ensures that the tensor's shape matches the expected shape. + * Wraps an arbitrary MLIR computation expressed as a module with a main() function. *

    - * Raises an error if the input tensor's shape does not match the specified shape. - * Returns the input tensor otherwise. - * - * @param data type for {@code output()} output - * @param input A tensor, whose shape is to be validated. - * @param shape The expected (possibly partially specified) shape of the input tensor. - * @return a new instance of EnsureShape + * This operation does not have an associated kernel and is not intended to be + * executed in a regular TensorFlow session. Instead it is intended to be used for + * testing or for special case where a user intends to pass custom MLIR computation + * through a TensorFlow graph with the intent of having custom tooling processing + * it downstream (when targeting a different environment, like TensorFlow lite for + * example). + * The MLIR module is expected to have a main() function that will be used as an + * entry point. The inputs to the operations will be passed as argument to the + * main() function and the returned values of the main function mapped to the + * outputs. + * Example usage: + *

    {@code
    +   *  import tensorflow as tf
    +   *  from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op
    +   *
    +   *  mlir_module = '''python
    +   *  func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> {
    +   *     %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32>
    +   *     return %ret : tensor<10x10xf32>
    +   *  }
    +   *  '''
    +   *
    +   * @tf.function def foo(x, y):
    +   *    return = mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32])
    +   *
    +   *  graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), tf.TensorSpec([10], tf.float32)).graph.as_graph_def()
    +   *  }
    + * @param inputs + * @param mlirModule + * @param Toutputs + * @return a new instance of MlirPassthroughOp */ - public EnsureShape ensureShape(Operand input, Shape shape) { - return EnsureShape.create(scope, input, shape); + public MlirPassthroughOp mlirPassthroughOp(Iterable> inputs, String mlirModule, + List> Toutputs) { + return MlirPassthroughOp.create(scope, inputs, mlirModule, Toutputs); } /** - * Inserts a dimension of 1 into a tensor's shape. - *

    - * Given a tensor `input`, this operation inserts a dimension of 1 at the - * dimension index `axis` of `input`'s shape. The dimension index `axis` starts at - * zero; if you specify a negative number for `axis` it is counted backward from - * the end. - *

    - * This operation is useful if you want to add a batch dimension to a single - * element. For example, if you have a single image of shape `[height, width, - * channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, - * which will make the shape `[1, height, width, channels]`. - *

    - * Other examples: - *

    {@code
    -   *  # 't' is a tensor of shape [2]
    -   *  shape(expand_dims(t, 0)) ==> [1, 2]
    -   *  shape(expand_dims(t, 1)) ==> [2, 1]
    -   *  shape(expand_dims(t, -1)) ==> [2, 1]
    -   *
    -   *  # 't2' is a tensor of shape [2, 3, 5]
    -   *  shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
    -   *  shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
    -   *  shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
    -   *  }
    - * This operation requires that: + * Creates an empty hash table that uses tensors as the backing store. *

    - * `-1-input.dims() <= dim <= input.dims()` + * It uses "open addressing" with quadratic reprobing to resolve + * collisions. *

    - * This operation is related to `squeeze()`, which removes dimensions of - * size 1. + * This op creates a mutable hash table, specifying the type of its keys and + * values. Each value must be a scalar. Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. * - * @param data type for {@code output()} output - * @param input - * @param axis 0-D (scalar). Specifies the dimension index at which to - * expand the shape of `input`. Must be in the range - * `[-rank(input) - 1, rank(input)]`. - * @return a new instance of ExpandDims + * @param emptyKey The key used to represent empty key buckets internally. Must not + * be used in insert or lookup operations. + * @param deletedKey + * @param valueDtype Type of the table values. + * @param options carries optional attributes values + * @return a new instance of MutableDenseHashTable */ - public ExpandDims expandDims(Operand input, - Operand axis) { - return ExpandDims.create(scope, input, axis); + public MutableDenseHashTable mutableDenseHashTable( + Operand emptyKey, Operand deletedKey, DataType valueDtype, + MutableDenseHashTable.Options... options) { + return MutableDenseHashTable.create(scope, emptyKey, deletedKey, valueDtype, options); } /** - * Extract `patches` from `input` and put them in the "depth" output dimension. 3D extension of `extract_image_patches`. - * - * @param data type for {@code patches()} output - * @param input 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`. - * @param ksizes The size of the sliding window for each dimension of `input`. - * @param strides 1-D of length 5. How far the centers of two consecutive patches are in - * `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. - * @param padding The type of padding algorithm to use. + * Creates an empty hash table. *

    - * We specify the size-related attributes as: - *

    {@code
    -   *        ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]
    -   *        strides = [1, stride_planes, strides_rows, strides_cols, 1]
    -   *  }
    - * @return a new instance of ExtractVolumePatches + * This op creates a mutable hash table, specifying the type of its keys and + * values. Each value must be a scalar. Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + * + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attributes values + * @return a new instance of MutableHashTable */ - public ExtractVolumePatches extractVolumePatches(Operand input, - List ksizes, List strides, String padding) { - return ExtractVolumePatches.create(scope, input, ksizes, strides, padding); + public MutableHashTable mutableHashTable(DataType keyDtype, + DataType valueDtype, MutableHashTable.Options... options) { + return MutableHashTable.create(scope, keyDtype, valueDtype, options); } /** - * Creates a tensor filled with a scalar value. - *

    - * This operation creates a tensor of shape `dims` and fills it with `value`. + * Creates an empty hash table. *

    - * For example: - *

    {@code
    -   *  # Output tensor has shape [2, 3].
    -   *  fill([2, 3], 9) ==> [[9, 9, 9]
    -   *                       [9, 9, 9]]
    -   *  }
    - * `tf.fill` differs from `tf.constant` in a few ways: - *
      - *
    • - * `tf.fill` only supports scalar contents, whereas `tf.constant` supports - * Tensor values. - *
    • - *
    • - * `tf.fill` creates an Op in the computation graph that constructs the actual - * Tensor value at runtime. This is in contrast to `tf.constant` which embeds - * the entire Tensor into the graph with a `Const` node. - *
    • - *
    • - * Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes - * based on other runtime Tensors, unlike `tf.constant`. + * This op creates a mutable hash table, specifying the type of its keys and + * values. Each value must be a vector. Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. * - * @param data type for {@code output()} output - * @param dims 1-D. Represents the shape of the output tensor. - * @param value 0-D (scalar). Value to fill the returned tensor. - *

      - * @compatibility(numpy) Equivalent to np.full - * @end_compatibility - * @return a new instance of Fill + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attributes values + * @return a new instance of MutableHashTableOfTensors */ - public Fill fill(Operand dims, Operand value) { - return Fill.create(scope, dims, value); + public MutableHashTableOfTensors mutableHashTableOfTensors( + DataType keyDtype, DataType valueDtype, MutableHashTableOfTensors.Options... options) { + return MutableHashTableOfTensors.create(scope, keyDtype, valueDtype, options); } /** - * Generates fingerprint values. - *

      - * Generates fingerprint values of `data`. - *

      - * Fingerprint op considers the first dimension of `data` as the batch dimension, - * and `output[i]` contains the fingerprint value generated from contents in - * `data[i, ...]` for all `i`. - *

      - * Fingerprint op writes fingerprint values as byte arrays. For example, the - * default method `farmhash64` generates a 64-bit fingerprint value at a time. - * This 8-byte value is written out as an `uint8` array of size 8, in little-endian - * order. - *

      - * For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4), - * and that the fingerprint method is `farmhash64`. In this case, the output shape - * is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of - * each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in - * `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers - * in `data[1, :, :]`. - *

      - * Note that this op fingerprints the raw underlying buffer, and it does not - * fingerprint Tensor's metadata such as data type and/or shape. For example, the - * fingerprint values are invariant under reshapes and bitcasts as long as the - * batch dimension remain the same: - *

      {@code
      -   *  Fingerprint(data) == Fingerprint(Reshape(data, ...))
      -   *  Fingerprint(data) == Fingerprint(Bitcast(data, ...))
      -   *  }
      - * For string data, one should expect `Fingerprint(data) != - * Fingerprint(ReduceJoin(data))` in general. + * Creates a Mutex resource that can be locked by `MutexLock`. * - * @param data Must have rank 1 or higher. - * @param method Fingerprint method used by this op. Currently available method is - * `farmhash::fingerprint64`. - * @return a new instance of Fingerprint + * @param options carries optional attributes values + * @return a new instance of Mutex */ - public Fingerprint fingerprint(Operand data, Operand method) { - return Fingerprint.create(scope, data, method); + public Mutex mutex(Mutex.Options... options) { + return Mutex.create(scope, options); } /** - * Gather slices from `params` axis `axis` according to `indices`. + * Locks a mutex resource. The output is the lock. So long as the lock tensor *

      - * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). - * Produces an output tensor with shape `params.shape[:axis] + indices.shape + - * params.shape[axis + 1:]` where: + * is alive, any other request to use `MutexLock` with this mutex will wait. + *

      + * This is particularly useful for creating a critical section when used in + * conjunction with `MutexLockIdentity`: *

      {@code
      -   *      # Scalar indices (output is rank(params) - 1).
      -   *      output[a_0, ..., a_n, b_0, ..., b_n] =
      -   *        params[a_0, ..., a_n, indices, b_0, ..., b_n]
      +   *  mutex = mutex_v2(
      +   *    shared_name=handle_name, container=container, name=name)
          *
      -   *      # Vector indices (output is rank(params)).
      -   *      output[a_0, ..., a_n, i, b_0, ..., b_n] =
      -   *        params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
      +   *  def execute_in_critical_section(fn, *args, **kwargs):
      +   *    lock = gen_resource_variable_ops.mutex_lock(mutex)
          *
      -   *      # Higher rank indices (output is rank(params) + rank(indices) - 1).
      -   *      output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
      -   *        params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
      +   *    with ops.control_dependencies([lock]):
      +   *      r = fn(*args, **kwargs)
      +   *
      +   *    with ops.control_dependencies(nest.flatten(r)):
      +   *      with ops.colocate_with(mutex):
      +   *        ensure_lock_exists = mutex_lock_identity(lock)
      +   *
      +   *      # Make sure that if any element of r is accessed, all of
      +   *      # them are executed together.
      +   *      r = nest.map_structure(tf.identity, r)
      +   *
      +   *    with ops.control_dependencies([ensure_lock_exists]):
      +   *      return nest.map_structure(tf.identity, r)
          *  }
      - *
      - * - *
      + * While `fn` is running in the critical section, no other functions which wish to + * use this critical section may run. *

      - * Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, a 0 is stored in the - * corresponding output value. + * Often the use case is that two executions of the same graph, in parallel, + * wish to run `fn`; and we wish to ensure that only one of them executes + * at a time. This is especially important if `fn` modifies one or more + * variables at a time. *

      - * See also `tf.batch_gather` and `tf.gather_nd`. + * It is also useful if two separate functions must share a resource, but we + * wish to ensure the usage is exclusive. + * + * @param mutex The mutex resource to lock. + * @return a new instance of MutexLock + */ + public MutexLock mutexLock(Operand mutex) { + return MutexLock.create(scope, mutex); + } + + /** + * Makes its input available to the next iteration. * * @param data type for {@code output()} output - * @param params The tensor from which to gather values. Must be at least rank - * `axis + 1`. - * @param indices Index tensor. Must be in range `[0, params.shape[axis])`. - * @param axis The axis in `params` to gather `indices` from. Defaults to the first - * dimension. Supports negative indexes. - * @param options carries optional attributes values - * @return a new instance of Gather + * @param data The tensor to be made available to the next iteration. + * @return a new instance of NextIteration */ - public Gather gather(Operand params, - Operand indices, Operand axis, Gather.Options... options) { - return Gather.create(scope, params, indices, axis, options); + public NextIteration nextIteration(Operand data) { + return NextIteration.create(scope, data); } /** - * Gather slices from `params` into a Tensor with shape specified by `indices`. - *

      - * `indices` is a K-dimensional integer tensor, best thought of as a - * (K-1)-dimensional tensor of indices into `params`, where each element defines a - * slice of `params`: - *

      - * output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]] - *

      - * Whereas in `tf.gather` `indices` defines slices into the `axis` - * dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the - * first `N` dimensions of `params`, where `N = indices.shape[-1]`. - *

      - * The last dimension of `indices` can be at most the rank of - * `params`: - *

      - * indices.shape[-1] <= params.rank + * Does nothing. Only useful as a placeholder for control edges. + * + * @return a new instance of NoOp + */ + public NoOp noOp() { + return NoOp.create(scope); + } + + /** + * Returns a one-hot tensor. *

      - * The last dimension of `indices` corresponds to elements - * (if `indices.shape[-1] == params.rank`) or slices - * (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` - * of `params`. The output tensor has shape + * The locations represented by indices in `indices` take value `on_value`, + * while all other locations take value `off_value`. *

      - * indices.shape[:-1] + params.shape[indices.shape[-1]:] + * If the input `indices` is rank `N`, the output will have rank `N+1`, + * The new axis is created at dimension `axis` (default: the new axis is + * appended at the end). *

      - * Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, a 0 is stored in the - * corresponding output value. + * If `indices` is a scalar the output shape will be a vector of length `depth`. *

      - * Some examples below. + * If `indices` is a vector of length `features`, the output shape will be: + *

      {@code
      +   *    features x depth if axis == -1
      +   *    depth x features if axis == 0
      +   *  }
      + * If `indices` is a matrix (batch) with shape `[batch, features]`, + * the output shape will be: + *
      {@code
      +   *    batch x features x depth if axis == -1
      +   *    batch x depth x features if axis == 1
      +   *    depth x batch x features if axis == 0
      +   *  }
      + * Examples + * ========= *

      - * Simple indexing into a matrix: + * Suppose that *

      {@code
      -   *      indices = [[0, 0], [1, 1]]
      -   *      params = [['a', 'b'], ['c', 'd']]
      -   *      output = ['a', 'd']
      +   *    indices = [0, 2, -1, 1]
      +   *    depth = 3
      +   *    on_value = 5.0
      +   *    off_value = 0.0
      +   *    axis = -1
          *  }
      - * Slice indexing into a matrix: + * Then output is `[4 x 3]`: *
      {@code
      -   *      indices = [[1], [0]]
      -   *      params = [['a', 'b'], ['c', 'd']]
      -   *      output = [['c', 'd'], ['a', 'b']]
      +   *  output =
      +   *    [5.0 0.0 0.0]  // one_hot(0)
      +   *    [0.0 0.0 5.0]  // one_hot(2)
      +   *    [0.0 0.0 0.0]  // one_hot(-1)
      +   *    [0.0 5.0 0.0]  // one_hot(1)
          *  }
      - * Indexing into a 3-tensor: + * Suppose that *
      {@code
      -   *      indices = [[1]]
      -   *      params = [[['a0', 'b0'], ['c0', 'd0']],
      -   *                [['a1', 'b1'], ['c1', 'd1']]]
      -   *      output = [[['a1', 'b1'], ['c1', 'd1']]]
      -   *
      -   *
      -   *      indices = [[0, 1], [1, 0]]
      -   *      params = [[['a0', 'b0'], ['c0', 'd0']],
      -   *                [['a1', 'b1'], ['c1', 'd1']]]
      -   *      output = [['c0', 'd0'], ['a1', 'b1']]
      -   *
      -   *
      -   *      indices = [[0, 0, 1], [1, 0, 1]]
      -   *      params = [[['a0', 'b0'], ['c0', 'd0']],
      -   *                [['a1', 'b1'], ['c1', 'd1']]]
      -   *      output = ['b0', 'b1']
      +   *    indices = [0, 2, -1, 1]
      +   *    depth = 3
      +   *    on_value = 0.0
      +   *    off_value = 3.0
      +   *    axis = 0
          *  }
      - * Batched indexing into a matrix: + * Then output is `[3 x 4]`: *
      {@code
      -   *      indices = [[[0, 0]], [[0, 1]]]
      -   *      params = [['a', 'b'], ['c', 'd']]
      -   *      output = [['a'], ['b']]
      +   *  output =
      +   *    [0.0 3.0 3.0 3.0]
      +   *    [3.0 3.0 3.0 0.0]
      +   *    [3.0 3.0 3.0 3.0]
      +   *    [3.0 0.0 3.0 3.0]
      +   *  //  ^                one_hot(0)
      +   *  //      ^            one_hot(2)
      +   *  //          ^        one_hot(-1)
      +   *  //              ^    one_hot(1)
          *  }
      - * Batched slice indexing into a matrix: + * Suppose that *
      {@code
      -   *      indices = [[[1]], [[0]]]
      -   *      params = [['a', 'b'], ['c', 'd']]
      -   *      output = [[['c', 'd']], [['a', 'b']]]
      +   *    indices = [[0, 2], [1, -1]]
      +   *    depth = 3
      +   *    on_value = 1.0
      +   *    off_value = 0.0
      +   *    axis = -1
          *  }
      - * Batched indexing into a 3-tensor: + * Then output is `[2 x 2 x 3]`: *
      {@code
      -   *      indices = [[[1]], [[0]]]
      -   *      params = [[['a0', 'b0'], ['c0', 'd0']],
      -   *                [['a1', 'b1'], ['c1', 'd1']]]
      -   *      output = [[[['a1', 'b1'], ['c1', 'd1']]],
      -   *                [[['a0', 'b0'], ['c0', 'd0']]]]
      -   *
      -   *      indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
      -   *      params = [[['a0', 'b0'], ['c0', 'd0']],
      -   *                [['a1', 'b1'], ['c1', 'd1']]]
      -   *      output = [[['c0', 'd0'], ['a1', 'b1']],
      -   *                [['a0', 'b0'], ['c1', 'd1']]]
      -   *
      -   *
      -   *      indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
      -   *      params = [[['a0', 'b0'], ['c0', 'd0']],
      -   *                [['a1', 'b1'], ['c1', 'd1']]]
      -   *      output = [['b0', 'b1'], ['d0', 'c1']]
      +   *  output =
      +   *    [
      +   *      [1.0, 0.0, 0.0]  // one_hot(0)
      +   *      [0.0, 0.0, 1.0]  // one_hot(2)
      +   *    ][
      +   *      [0.0, 1.0, 0.0]  // one_hot(1)
      +   *      [0.0, 0.0, 0.0]  // one_hot(-1)
      +   *    ]
          *  }
      - * See also `tf.gather` and `tf.batch_gather`. * - * @param data type for {@code output()} output - * @param params The tensor from which to gather values. - * @param indices Index tensor. - * @return a new instance of GatherNd + * @param data type for {@code output()} output + * @param indices A tensor of indices. + * @param depth A scalar defining the depth of the one hot dimension. + * @param onValue A scalar defining the value to fill in output when `indices[j] = i`. + * @param offValue A scalar defining the value to fill in output when `indices[j] != i`. + * @param options carries optional attributes values + * @return a new instance of OneHot */ - public GatherNd gatherNd(Operand params, - Operand indices) { - return GatherNd.create(scope, params, indices); + public OneHot oneHot(Operand indices, + Operand depth, Operand onValue, Operand offValue, OneHot.Options... options) { + return OneHot.create(scope, indices, depth, onValue, offValue, options); } /** - * Store the input tensor in the state of the current session. + * Returns a tensor of ones with the same shape and type as x. * - * @param value The tensor to be stored. - * @return a new instance of GetSessionHandle + * @param data type for {@code y()} output + * @param x a tensor of type T. + * @return a new instance of OnesLike */ - public GetSessionHandle getSessionHandle(Operand value) { - return GetSessionHandle.create(scope, value); + public OnesLike onesLike(Operand x) { + return OnesLike.create(scope, x); } /** - * Get the value of the tensor specified by its handle. + * Op removes all elements in the underlying container. * - * @param data type for {@code value()} output - * @param handle The handle for a tensor stored in the session state. - * @param dtype The type of the output value. - * @return a new instance of GetSessionTensor + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of OrderedMapClear */ - public GetSessionTensor getSessionTensor(Operand handle, - DataType dtype) { - return GetSessionTensor.create(scope, handle, dtype); + public OrderedMapClear orderedMapClear(List> dtypes, + OrderedMapClear.Options... options) { + return OrderedMapClear.create(scope, dtypes, options); } /** - * Adds gradients computation ops to the graph according to scope. + * Op returns the number of incomplete elements in the underlying container. * - * @param scope current graph scope - * @param y outputs of the function to derive - * @param x inputs of the function for which partial derivatives are computed + * @param dtypes * @param options carries optional attributes values - * @return a new instance of {@code Gradients} - * @throws IllegalArgumentException if execution environment is not a graph + * @return a new instance of OrderedMapIncompleteSize */ - public Gradients gradients(Iterable> y, Iterable> x, - Gradients.Options... options) { - return Gradients.create(scope, y, x, options); + public OrderedMapIncompleteSize orderedMapIncompleteSize(List> dtypes, + OrderedMapIncompleteSize.Options... options) { + return OrderedMapIncompleteSize.create(scope, dtypes, options); } /** - * Adds operations to compute the partial derivatives of sum of {@code y}s w.r.t {@code x}s, - * i.e., {@code d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2...} - *

      - * If {@code Options.dx()} values are set, they are as the initial symbolic partial derivatives of some loss - * function {@code L} w.r.t. {@code y}. {@code Options.dx()} must have the size of {@code y}. - *

      - * If {@code Options.dx()} is not set, the implementation will use dx of {@code OnesLike} for all - * shapes in {@code y}. - *

      - * The partial derivatives are returned in output {@code dy}, with the size of {@code x}. + * Op peeks at the values at the specified key. If the *

      - * Example of usage: - *

      {@code
      -   *  Gradients gradients = Gradients.create(scope, Arrays.asList(loss), Arrays.asList(w, b));
      -   *
      -   *  Constant alpha = ops.constant(1.0f, Float.class);
      -   *  ApplyGradientDescent.create(scope, w, alpha, gradients.dy(0));
      -   *  ApplyGradientDescent.create(scope, b, alpha, gradients.dy(1));
      -   *  }
      + * underlying container does not contain this key + * this op will block until it does. This Op is optimized for + * performance. * - * @param y output of the function to derive - * @param x inputs of the function for which partial derivatives are computed + * @param key + * @param indices + * @param dtypes * @param options carries optional attributes values - * @return a new instance of {@code Gradients} - * @throws IllegalArgumentException if execution environment is not a graph + * @return a new instance of OrderedMapPeek */ - public Gradients gradients(Operand y, Iterable> x, - Gradients.Options... options) { - return Gradients.create(scope, y, x, options); + public OrderedMapPeek orderedMapPeek(Operand key, Operand indices, + List> dtypes, OrderedMapPeek.Options... options) { + return OrderedMapPeek.create(scope, key, indices, dtypes, options); } /** - * Gives a guarantee to the TF runtime that the input tensor is a constant. - *

      - * The runtime is then free to make optimizations based on this. - *

      - * Only accepts value typed tensors as inputs and rejects resource variable handles - * as input. - *

      - * Returns the input tensor without modification. + * Op returns the number of elements in the underlying container. * - * @param data type for {@code output()} output - * @param input - * @return a new instance of GuaranteeConst + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of OrderedMapSize */ - public GuaranteeConst guaranteeConst(Operand input) { - return GuaranteeConst.create(scope, input); + public OrderedMapSize orderedMapSize(List> dtypes, + OrderedMapSize.Options... options) { + return OrderedMapSize.create(scope, dtypes, options); } /** - * Creates a non-initialized hash table. + * Stage (key, values) in the underlying container which behaves like a ordered *

      - * This op creates a hash table, specifying the type of its keys and values. - * Before using the table you will have to initialize it. After initialization the - * table will be immutable. + * associative container. Elements are ordered by key. * - * @param keyDtype Type of the table keys. - * @param valueDtype Type of the table values. + * @param key int64 + * @param indices + * @param values a list of tensors + * dtypes A list of data types that inserted values should adhere to. + * @param dtypes * @param options carries optional attributes values - * @return a new instance of HashTable + * @return a new instance of OrderedMapStage */ - public HashTable hashTable(DataType keyDtype, - DataType valueDtype, HashTable.Options... options) { - return HashTable.create(scope, keyDtype, valueDtype, options); + public OrderedMapStage orderedMapStage(Operand key, Operand indices, + Iterable> values, List> dtypes, OrderedMapStage.Options... options) { + return OrderedMapStage.create(scope, key, indices, values, dtypes, options); } /** - * Return histogram of values. + * Op removes and returns the values associated with the key *

      - * Given the tensor `values`, this operation returns a rank 1 histogram counting - * the number of entries in `values` that fall into every bin. The bins are - * equal width and determined by the arguments `value_range` and `nbins`. - *

      {@code
      -   *  # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
      -   *  nbins = 5
      -   *  value_range = [0.0, 5.0]
      -   *  new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
      -   *
      -   *  with tf.get_default_session() as sess:
      -   *    hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
      -   *    variables.global_variables_initializer().run()
      -   *    sess.run(hist) => [2, 1, 1, 0, 2]
      -   *  }
      + * from the underlying container. If the underlying container + * does not contain this key, the op will block until it does. * - * @param data type for {@code out()} output - * @param values Numeric `Tensor`. - * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. - * values <= value_range[0] will be mapped to hist[0], - * values >= value_range[1] will be mapped to hist[-1]. - * @param nbins Scalar `int32 Tensor`. Number of histogram bins. - * @return a new instance of HistogramFixedWidth + * @param key + * @param indices + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of OrderedMapUnstage */ - public HistogramFixedWidth histogramFixedWidth(Operand values, - Operand valueRange, Operand nbins) { - return HistogramFixedWidth.create(scope, values, valueRange, nbins); + public OrderedMapUnstage orderedMapUnstage(Operand key, Operand indices, + List> dtypes, OrderedMapUnstage.Options... options) { + return OrderedMapUnstage.create(scope, key, indices, dtypes, options); } /** - * Return histogram of values. + * Op removes and returns the (key, value) element with the smallest *

      - * Given the tensor `values`, this operation returns a rank 1 histogram counting - * the number of entries in `values` that fall into every bin. The bins are - * equal width and determined by the arguments `value_range` and `nbins`. - *

      {@code
      -   *  # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
      -   *  nbins = 5
      -   *  value_range = [0.0, 5.0]
      -   *  new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
      -   *
      -   *  with tf.get_default_session() as sess:
      -   *    hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
      -   *    variables.global_variables_initializer().run()
      -   *    sess.run(hist) => [2, 1, 1, 0, 2]
      -   *  }
      + * key from the underlying container. If the underlying container + * does not contain elements, the op will block until it does. * - * @param data type for {@code out()} output - * @param values Numeric `Tensor`. - * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. - * values <= value_range[0] will be mapped to hist[0], - * values >= value_range[1] will be mapped to hist[-1]. - * @param nbins Scalar `int32 Tensor`. Number of histogram bins. - * @param dtype - * @return a new instance of HistogramFixedWidth + * @param indices + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of OrderedMapUnstageNoKey */ - public HistogramFixedWidth histogramFixedWidth( - Operand values, Operand valueRange, Operand nbins, DataType dtype) { - return HistogramFixedWidth.create(scope, values, valueRange, nbins, dtype); + public OrderedMapUnstageNoKey orderedMapUnstageNoKey(Operand indices, + List> dtypes, OrderedMapUnstageNoKey.Options... options) { + return OrderedMapUnstageNoKey.create(scope, indices, dtypes, options); } /** - * Return a tensor with the same shape and contents as the input tensor or value. + * Pads a tensor. + *

      + * This operation pads `input` according to the `paddings` and `constant_values` + * you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is + * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + * how many padding values to add before the contents of `input` in that dimension, + * and `paddings[D, 1]` indicates how many padding values to add after the contents + * of `input` in that dimension. `constant_values` is a scalar tensor of the same + * type as `input` that indicates the value to use for padding `input`. + *

      + * The padded size of each dimension D of the output is: + *

      + * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + *

      + * For example: + *

      {@code
      +   *  # 't' is [[1, 1], [2, 2]]
      +   *  # 'paddings' is [[1, 1], [2, 2]]
      +   *  # 'constant_values' is 0
      +   *  # rank of 't' is 2
      +   *  pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
      +   *                        [0, 0, 1, 1, 0, 0]
      +   *                        [0, 0, 2, 2, 0, 0]
      +   *                        [0, 0, 0, 0, 0, 0]]
      +   *  }
      * * @param data type for {@code output()} output * @param input - * @return a new instance of Identity + * @param paddings + * @param constantValues + * @return a new instance of Pad */ - public Identity identity(Operand input) { - return Identity.create(scope, input); + public Pad pad(Operand input, Operand paddings, + Operand constantValues) { + return Pad.create(scope, input, paddings, constantValues); } /** - * Returns a list of tensors with the same shapes and contents as the input + * Concatenates a list of `N` tensors along the first dimension. *

      - * tensors. + * The input tensors are all required to have size 1 in the first dimension. *

      - * This op can be used to override the gradient for complicated functions. For - * example, suppose y = f(x) and we wish to apply a custom function g for backprop - * such that dx = g(dy). In Python, + * For example: *

      {@code
      -   *  with tf.get_default_graph().gradient_override_map(
      -   *      {'IdentityN': 'OverrideGradientWithG'}):
      -   *    y, _ = identity_n([f(x), x])
      -   *
      -   * @tf.RegisterGradient('OverrideGradientWithG') def ApplyG(op, dy, _):
      -   *    return [None, g(dy)]  # Do not backprop to f(x).
      +   *  # 'x' is [[1, 4]]
      +   *  # 'y' is [[2, 5]]
      +   *  # 'z' is [[3, 6]]
      +   *  parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
          *  }
      - * @param input - * @return a new instance of IdentityN - */ - public IdentityN identityN(Iterable> input) { - return IdentityN.create(scope, input); - } - - /** - * Returns immutable tensor from memory region. - *

      - * The current implementation memmaps the tensor from a file. + * The difference between concat and parallel_concat is that concat requires all + * of the inputs be computed before the operation will begin but doesn't require + * that the input shapes be known during graph construction. Parallel concat + * will copy pieces of the input into the output as they become available, in + * some situations this can provide a performance benefit. * - * @param data type for {@code tensor()} output - * @param dtype Type of the returned tensor. - * @param shape Shape of the returned tensor. - * @param memoryRegionName Name of readonly memory region used by the tensor, see - * NewReadOnlyMemoryRegionFromFile in tensorflow::Env. - * @return a new instance of ImmutableConst + * @param data type for {@code output()} output + * @param values Tensors to be concatenated. All must have size 1 in the first dimension + * and same shape. + * @param shape the final shape of the result; should be equal to the shapes of any input + * but with the number of input values in the first dimension. + * @return a new instance of ParallelConcat */ - public ImmutableConst immutableConst(DataType dtype, Shape shape, - String memoryRegionName) { - return ImmutableConst.create(scope, dtype, shape, memoryRegionName); + public ParallelConcat parallelConcat(Iterable> values, + Shape shape) { + return ParallelConcat.create(scope, values, shape); } /** - * Table initializer that takes two tensors for keys and values respectively. + * Interleave the values from the `data` tensors into a single tensor. + *

      + * Builds a merged tensor such that + *

      {@code
      +   *      merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
      +   *  }
      + * For example, if each `indices[m]` is scalar or vector, we have + *
      {@code
      +   *      # Scalar indices:
      +   *      merged[indices[m], ...] = data[m][...]
          *
      -   * @param tableHandle Handle to a table which will be initialized.
      -   * @param keys Keys of type Tkey.
      -   * @param values Values of type Tval.
      -   * @return a new instance of InitializeTable
      -   */
      -  public  InitializeTable initializeTable(Operand tableHandle,
      -      Operand keys, Operand values) {
      -    return InitializeTable.create(scope, tableHandle, keys, values);
      -  }
      -
      -  /**
      -   * Initializes a table from a text file.
      +   *      # Vector indices:
      +   *      merged[indices[m][i], ...] = data[m][i, ...]
      +   *  }
      + * Each `data[i].shape` must start with the corresponding `indices[i].shape`, + * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we + * must have `data[i].shape = indices[i].shape + constant`. In terms of this + * `constant`, the output shape is *

      - * It inserts one key-value pair into the table for each line of the file. - * The key and value is extracted from the whole line content, elements from the - * split line based on `delimiter` or the line number (starting from zero). - * Where to extract the key and value from a line is specified by `key_index` and - * `value_index`. + * merged.shape = [max(indices)] + constant *

      - * - A value of -1 means use the line number(starting from zero), expects `int64`. - * - A value of -2 means use the whole line content, expects `string`. - * - A value >= 0 means use the index (starting at zero) of the split line based - * on `delimiter`. - * - * @param tableHandle Handle to a table which will be initialized. - * @param filename Filename of a vocabulary text file. - * @param keyIndex Column index in a line to get the table `key` values from. - * @param valueIndex Column index that represents information of a line to get the table - * `value` values from. - * @param options carries optional attributes values - * @return a new instance of InitializeTableFromTextFile + * Values may be merged in parallel, so if an index appears in both `indices[m][i]` + * and `indices[n][j]`, the result may be invalid. This differs from the normal + * DynamicStitch operator that defines the behavior in that case. + *

      + * For example: + *

      {@code
      +   *      indices[0] = 6
      +   *      indices[1] = [4, 1]
      +   *      indices[2] = [[5, 2], [0, 3]]
      +   *      data[0] = [61, 62]
      +   *      data[1] = [[41, 42], [11, 12]]
      +   *      data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
      +   *      merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
      +   *                [51, 52], [61, 62]]
      +   *  }
      + * This method can be used to merge partitions created by `dynamic_partition` + * as illustrated on the following example: + *
      {@code
      +   *      # Apply function (increments x_i) on elements for which a certain condition
      +   *      # apply (x_i != -1 in this example).
      +   *      x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
      +   *      condition_mask=tf.not_equal(x,tf.constant(-1.))
      +   *      partitioned_data = tf.dynamic_partition(
      +   *          x, tf.cast(condition_mask, tf.int32) , 2)
      +   *      partitioned_data[1] = partitioned_data[1] + 1.0
      +   *      condition_indices = tf.dynamic_partition(
      +   *          tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
      +   *      x = tf.dynamic_stitch(condition_indices, partitioned_data)
      +   *      # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
      +   *      # unchanged.
      +   *  }
      + *
      + * + *
      + * + * @param data type for {@code merged()} output + * @param indices + * @param data + * @return a new instance of ParallelDynamicStitch */ - public InitializeTableFromTextFile initializeTableFromTextFile(Operand tableHandle, - Operand filename, Long keyIndex, Long valueIndex, - InitializeTableFromTextFile.Options... options) { - return InitializeTableFromTextFile.create(scope, tableHandle, filename, keyIndex, valueIndex, options); + public ParallelDynamicStitch parallelDynamicStitch( + Iterable> indices, Iterable> data) { + return ParallelDynamicStitch.create(scope, indices, data); } /** - * Adds v into specified rows of x. + * A placeholder op for a value that will be fed into the computation. *

      - * Computes y = x; y[i, :] += v; return y. + * N.B. This operation will fail with an error if it is executed. It is + * intended as a way to represent a value that will always be fed, and to + * provide attrs that enable the fed value to be checked at runtime. * - * @param data type for {@code y()} output - * @param x A `Tensor` of type T. - * @param i A vector. Indices into the left-most dimension of `x`. - * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. - * @return a new instance of InplaceAdd + * @param data type for {@code output()} output + * @param dtype The type of elements in the tensor. + * @param options carries optional attributes values + * @return a new instance of Placeholder */ - public InplaceAdd inplaceAdd(Operand x, Operand i, Operand v) { - return InplaceAdd.create(scope, x, i, v); + public Placeholder placeholder(DataType dtype, + Placeholder.Options... options) { + return Placeholder.create(scope, dtype, options); } /** - * Subtracts `v` into specified rows of `x`. + * A placeholder op that passes through `input` when its output is not fed. + * + * @param data type for {@code output()} output + * @param input The default value to produce when `output` is not fed. + * @param shape The (possibly partial) shape of the tensor. + * @return a new instance of PlaceholderWithDefault + */ + public PlaceholderWithDefault placeholderWithDefault(Operand input, + Shape shape) { + return PlaceholderWithDefault.create(scope, input, shape); + } + + /** + * Prints a string scalar. *

      - * Computes y = x; y[i, :] -= v; return y. + * Prints a string scalar to the desired output_stream. * - * @param data type for {@code y()} output - * @param x A `Tensor` of type T. - * @param i A vector. Indices into the left-most dimension of `x`. - * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. - * @return a new instance of InplaceSub + * @param input The string scalar to print. + * @param options carries optional attributes values + * @return a new instance of Print */ - public InplaceSub inplaceSub(Operand x, Operand i, Operand v) { - return InplaceSub.create(scope, x, i, v); + public Print print(Operand input, Print.Options... options) { + return Print.create(scope, input, options); } /** - * Updates specified rows with values in `v`. + * Computes the product of elements across dimensions of a tensor. *

      - * Computes `x[i, :] = v; return x`. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * - * @param data type for {@code y()} output - * @param x A tensor of type `T`. - * @param i A vector. Indices into the left-most dimension of `x`. - * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. - * @return a new instance of InplaceUpdate + * @param data type for {@code output()} output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of Prod */ - public InplaceUpdate inplaceUpdate(Operand x, Operand i, - Operand v) { - return InplaceUpdate.create(scope, x, i, v); + public Prod prod(Operand input, Operand axis, + Prod.Options... options) { + return Prod.create(scope, input, axis, options); } /** - * Checks whether a tensor has been initialized. + * Reshapes a quantized tensor as per the Reshape op. *

      - * Outputs boolean scalar indicating whether the tensor has been initialized. + * ``` * - * @param ref Should be from a `Variable` node. May be uninitialized. - * @return a new instance of IsVariableInitialized + * @param data type for {@code output()} output + * @param tensor + * @param shape Defines the shape of the output tensor. + * @param inputMin The minimum value of the input. + * @param inputMax The maximum value of the input. + * @return a new instance of QuantizedReshape */ - public IsVariableInitialized isVariableInitialized(Operand ref) { - return IsVariableInitialized.create(scope, ref); + public QuantizedReshape quantizedReshape( + Operand tensor, Operand shape, Operand inputMin, Operand inputMax) { + return QuantizedReshape.create(scope, tensor, shape, inputMin, inputMax); } /** - * Generates values in an interval. + * Creates a sequence of numbers. *

      - * A sequence of `num` evenly-spaced values are generated beginning at `start`. - * If `num > 1`, the values in the sequence increase by `stop - start / num - 1`, - * so that the last one is exactly `stop`. + * This operation creates a sequence of numbers that begins at `start` and + * extends by increments of `delta` up to but not including `limit`. *

      * For example: *

      {@code
      -   *  tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
      +   *  # 'start' is 3
      +   *  # 'limit' is 18
      +   *  # 'delta' is 3
      +   *  tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
          *  }
      * * @param data type for {@code output()} output - * @param start 0-D tensor. First entry in the range. - * @param stop 0-D tensor. Last entry in the range. - * @param num 0-D tensor. Number of values to generate. - * @return a new instance of LinSpace + * @param start 0-D (scalar). First entry in the sequence. + * @param limit 0-D (scalar). Upper limit of sequence, exclusive. + * @param delta 0-D (scalar). Optional. Default is 1. Number that increments `start`. + * @return a new instance of Range */ - public LinSpace linSpace(Operand start, - Operand stop, Operand num) { - return LinSpace.create(scope, start, stop, num); + public Range range(Operand start, Operand limit, Operand delta) { + return Range.create(scope, start, limit, delta); } /** - * Outputs all keys and values in the table. + * Returns the rank of a tensor. + *

      + * This operation returns an integer representing the rank of `input`. + *

      + * For example: + *

      {@code
      +   *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
      +   *  # shape of tensor 't' is [2, 2, 3]
      +   *  rank(t) ==> 3
      +   *  }
      + * Note: The rank of a tensor is not the same as the rank of a matrix. The rank + * of a tensor is the number of indices required to uniquely select each element + * of the tensor. Rank is also known as "order", "degree", or "ndims." * - * @param data type for {@code keys()} output - * @param data type for {@code values()} output - * @param tableHandle Handle to the table. - * @param Tkeys - * @param Tvalues - * @return a new instance of LookupTableExport + * @param input + * @return a new instance of Rank */ - public LookupTableExport lookupTableExport( - Operand tableHandle, DataType Tkeys, DataType Tvalues) { - return LookupTableExport.create(scope, tableHandle, Tkeys, Tvalues); + public Rank rank(Operand input) { + return Rank.create(scope, input); } /** - * Looks up keys in a table, outputs the corresponding values. + * Reads the value of a variable. *

      - * The tensor `keys` must of the same type as the keys of the table. - * The output `values` is of the type of the table values. + * The tensor returned by this operation is immutable. *

      - * The scalar `default_value` is the value output for keys not present in the - * table. It must also be of the same type as the table values. + * The value returned by this operation is guaranteed to be influenced by all the + * writes on which this operation depends directly or indirectly, and to not be + * influenced by any of the writes which depend directly or indirectly on this + * operation. * - * @param data type for {@code values()} output - * @param tableHandle Handle to the table. - * @param keys Any shape. Keys to look up. - * @param defaultValue - * @return a new instance of LookupTableFind + * @param data type for {@code value()} output + * @param resource handle to the resource in which to store the variable. + * @param dtype the dtype of the value. + * @return a new instance of ReadVariableOp */ - public LookupTableFind lookupTableFind( - Operand tableHandle, Operand keys, Operand defaultValue) { - return LookupTableFind.create(scope, tableHandle, keys, defaultValue); + public ReadVariableOp readVariableOp(Operand resource, + DataType dtype) { + return ReadVariableOp.create(scope, resource, dtype); } /** - * Replaces the contents of the table with the specified keys and values. + * Computes the "logical and" of elements across dimensions of a tensor. *

      - * The tensor `keys` must be of the same type as the keys of the table. - * The tensor `values` must be of the type of the table values. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * - * @param tableHandle Handle to the table. - * @param keys Any shape. Keys to look up. - * @param values Values to associate with keys. - * @return a new instance of LookupTableImport + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of ReduceAll */ - public LookupTableImport lookupTableImport( - Operand tableHandle, Operand keys, Operand values) { - return LookupTableImport.create(scope, tableHandle, keys, values); + public ReduceAll reduceAll(Operand input, Operand axis, + ReduceAll.Options... options) { + return ReduceAll.create(scope, input, axis, options); } /** - * Updates the table to associates keys with values. + * Computes the "logical or" of elements across dimensions of a tensor. *

      - * The tensor `keys` must be of the same type as the keys of the table. - * The tensor `values` must be of the type of the table values. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * - * @param tableHandle Handle to the table. - * @param keys Any shape. Keys to look up. - * @param values Values to associate with keys. - * @return a new instance of LookupTableInsert + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of ReduceAny */ - public LookupTableInsert lookupTableInsert( - Operand tableHandle, Operand keys, Operand values) { - return LookupTableInsert.create(scope, tableHandle, keys, values); - } - - /** - * Computes the number of elements in the given table. - * - * @param tableHandle Handle to the table. - * @return a new instance of LookupTableSize - */ - public LookupTableSize lookupTableSize(Operand tableHandle) { - return LookupTableSize.create(scope, tableHandle); + public ReduceAny reduceAny(Operand input, Operand axis, + ReduceAny.Options... options) { + return ReduceAny.create(scope, input, axis, options); } /** - * Forwards the input to the output. + * Computes the maximum of elements across dimensions of a tensor. *

      - * This operator represents the loop termination condition used by the - * "pivot" switches of a loop. - * - * @param input A boolean scalar, representing the branch predicate of the Switch op. - * @return a new instance of LoopCond - */ - public LoopCond loopCond(Operand input) { - return LoopCond.create(scope, input); - } - - /** - * Op removes all elements in the underlying container. - * - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of MapClear - */ - public MapClear mapClear(List> dtypes, MapClear.Options... options) { - return MapClear.create(scope, dtypes, options); - } - - /** - * Op returns the number of incomplete elements in the underlying container. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * - * @param dtypes + * @param data type for {@code output()} output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. * @param options carries optional attributes values - * @return a new instance of MapIncompleteSize + * @return a new instance of ReduceMax */ - public MapIncompleteSize mapIncompleteSize(List> dtypes, - MapIncompleteSize.Options... options) { - return MapIncompleteSize.create(scope, dtypes, options); + public ReduceMax reduceMax(Operand input, + Operand axis, ReduceMax.Options... options) { + return ReduceMax.create(scope, input, axis, options); } /** - * Op peeks at the values at the specified key. If the + * Computes the minimum of elements across dimensions of a tensor. *

      - * underlying container does not contain this key - * this op will block until it does. - * - * @param key - * @param indices - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of MapPeek - */ - public MapPeek mapPeek(Operand key, Operand indices, List> dtypes, - MapPeek.Options... options) { - return MapPeek.create(scope, key, indices, dtypes, options); - } - - /** - * Op returns the number of elements in the underlying container. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * - * @param dtypes + * @param data type for {@code output()} output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. * @param options carries optional attributes values - * @return a new instance of MapSize + * @return a new instance of ReduceMin */ - public MapSize mapSize(List> dtypes, MapSize.Options... options) { - return MapSize.create(scope, dtypes, options); + public ReduceMin reduceMin(Operand input, + Operand axis, ReduceMin.Options... options) { + return ReduceMin.create(scope, input, axis, options); } /** - * Stage (key, values) in the underlying container which behaves like a hashtable. + * Computes the product of elements across dimensions of a tensor. + *

      + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * - * @param key int64 - * @param indices - * @param values a list of tensors - * dtypes A list of data types that inserted values should adhere to. - * @param dtypes + * @param data type for {@code output()} output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. * @param options carries optional attributes values - * @return a new instance of MapStage + * @return a new instance of ReduceProd */ - public MapStage mapStage(Operand key, Operand indices, - Iterable> values, List> dtypes, MapStage.Options... options) { - return MapStage.create(scope, key, indices, values, dtypes, options); + public ReduceProd reduceProd(Operand input, + Operand axis, ReduceProd.Options... options) { + return ReduceProd.create(scope, input, axis, options); } /** - * Op removes and returns the values associated with the key + * Computes the sum of elements across dimensions of a tensor. *

      - * from the underlying container. If the underlying container - * does not contain this key, the op will block until it does. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * - * @param key - * @param indices - * @param dtypes + * @param data type for {@code output()} output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. * @param options carries optional attributes values - * @return a new instance of MapUnstage + * @return a new instance of ReduceSum */ - public MapUnstage mapUnstage(Operand key, Operand indices, - List> dtypes, MapUnstage.Options... options) { - return MapUnstage.create(scope, key, indices, dtypes, options); + public ReduceSum reduceSum(Operand input, + Operand axis, ReduceSum.Options... options) { + return ReduceSum.create(scope, input, axis, options); } /** - * Op removes and returns a random (key, value) - *

      - * from the underlying container. If the underlying container - * does not contain elements, the op will block until it does. + * Makes its input available to the next iteration. * - * @param indices - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of MapUnstageNoKey + * @param data type for {@code output()} output + * @param data The tensor to be made available to the next iteration. + * @return a new instance of RefNextIteration */ - public MapUnstageNoKey mapUnstageNoKey(Operand indices, List> dtypes, - MapUnstageNoKey.Options... options) { - return MapUnstageNoKey.create(scope, indices, dtypes, options); + public RefNextIteration refNextIteration(Operand data) { + return RefNextIteration.create(scope, data); } /** - * Computes the maximum of elements across dimensions of a tensor. - *

      - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * Forwards the `index`th element of `inputs` to `output`. * * @param data type for {@code output()} output - * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values - * @return a new instance of Max + * @param index A scalar that determines the input that gets selected. + * @param inputs A list of ref tensors, one of which will be forwarded to `output`. + * @return a new instance of RefSelect */ - public Max max(Operand input, Operand axis, - Max.Options... options) { - return Max.create(scope, input, axis, options); + public RefSelect refSelect(Operand index, + Iterable> inputs) { + return RefSelect.create(scope, index, inputs); } /** - * Forwards the value of an available tensor from `inputs` to `output`. + * Forwards the ref tensor `data` to the output port determined by `pred`. *

      - * `Merge` waits for at least one of the tensors in `inputs` to become available. - * It is usually combined with `Switch` to implement branching. + * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, + * the data goes to `output_false`. *

      - * `Merge` forwards the first tensor to become available to `output`, and sets - * `value_index` to its index in `inputs`. + * See also `Switch` and `Merge`. * - * @param data type for {@code output()} output - * @param inputs The input tensors, exactly one of which will become available. - * @return a new instance of Merge + * @param data type for {@code outputFalse()} output + * @param data The ref tensor to be forwarded to the appropriate output. + * @param pred A scalar that specifies which output port will receive data. + * @return a new instance of RefSwitch */ - public Merge merge(Iterable> inputs) { - return Merge.create(scope, inputs); + public RefSwitch refSwitch(Operand data, Operand pred) { + return RefSwitch.create(scope, data, pred); } /** - * Computes the minimum of elements across dimensions of a tensor. + * Execute a sub graph on a remote processor. *

      - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * The graph specifications(such as graph itself, input tensors and output names) + * are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo + * as serialized_remote_fused_graph_execute_info. + * The specifications will be passed to a dedicated registered + * remote fused graph executor. The executor will send the graph specifications + * to a remote processor and execute that graph. The execution results + * will be passed to consumer nodes as outputs of this node. * - * @param data type for {@code output()} output - * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values - * @return a new instance of Min + * @param inputs Arbitrary number of tensors with arbitrary data types + * @param Toutputs + * @param serializedRemoteFusedGraphExecuteInfo Serialized protocol buffer + * of RemoteFusedGraphExecuteInfo which contains graph specifications. + * @return a new instance of RemoteFusedGraphExecute */ - public Min min(Operand input, Operand axis, - Min.Options... options) { - return Min.create(scope, input, axis, options); + public RemoteFusedGraphExecute remoteFusedGraphExecute(Iterable> inputs, + List> Toutputs, String serializedRemoteFusedGraphExecuteInfo) { + return RemoteFusedGraphExecute.create(scope, inputs, Toutputs, serializedRemoteFusedGraphExecuteInfo); } /** - * Pads a tensor with mirrored values. + * Reshapes a tensor. *

      - * This operation pads a `input` with mirrored values according to the `paddings` - * you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is - * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates - * how many values to add before the contents of `input` in that dimension, and - * `paddings[D, 1]` indicates how many values to add after the contents of `input` - * in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater - * than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true - * (if false, respectively). + * Given `tensor`, this operation returns a tensor that has the same values + * as `tensor` with shape `shape`. *

      - * The padded size of each dimension D of the output is: + * If one component of 1-D tensor `shape` is the special value -1, the size of that + * dimension is computed so that the total size remains constant. In particular, a + * `shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be + * unknown. *

      - * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + * The `shape` must be 1-D and the operation returns a tensor with shape + * `shape` filled with the values of `tensor`. In this case, the number of elements + * implied by `shape` must be the same as the number of elements in `tensor`. + *

      + * It is an error if `shape` is not 1-D. *

      * For example: *

      {@code
      -   *  # 't' is [[1, 2, 3], [4, 5, 6]].
      -   *  # 'paddings' is [[1, 1]], [2, 2]].
      -   *  # 'mode' is SYMMETRIC.
      -   *  # rank of 't' is 2.
      -   *  pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
      -   *                        [2, 1, 1, 2, 3, 3, 2]
      -   *                        [5, 4, 4, 5, 6, 6, 5]
      -   *                        [5, 4, 4, 5, 6, 6, 5]]
      +   *  # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
      +   *  # tensor 't' has shape [9]
      +   *  reshape(t, [3, 3]) ==> [[1, 2, 3],
      +   *                          [4, 5, 6],
      +   *                          [7, 8, 9]]
      +   *
      +   *  # tensor 't' is [[[1, 1], [2, 2]],
      +   *  #                [[3, 3], [4, 4]]]
      +   *  # tensor 't' has shape [2, 2, 2]
      +   *  reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
      +   *                          [3, 3, 4, 4]]
      +   *
      +   *  # tensor 't' is [[[1, 1, 1],
      +   *  #                 [2, 2, 2]],
      +   *  #                [[3, 3, 3],
      +   *  #                 [4, 4, 4]],
      +   *  #                [[5, 5, 5],
      +   *  #                 [6, 6, 6]]]
      +   *  # tensor 't' has shape [3, 2, 3]
      +   *  # pass '[-1]' to flatten 't'
      +   *  reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
      +   *
      +   *  # -1 can also be used to infer the shape
      +   *
      +   *  # -1 is inferred to be 9:
      +   *  reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
      +   *                           [4, 4, 4, 5, 5, 5, 6, 6, 6]]
      +   *  # -1 is inferred to be 2:
      +   *  reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
      +   *                           [4, 4, 4, 5, 5, 5, 6, 6, 6]]
      +   *  # -1 is inferred to be 3:
      +   *  reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
      +   *                                [2, 2, 2],
      +   *                                [3, 3, 3]],
      +   *                               [[4, 4, 4],
      +   *                                [5, 5, 5],
      +   *                                [6, 6, 6]]]
      +   *
      +   *  # tensor 't' is [7]
      +   *  # shape `[]` reshapes to a scalar
      +   *  reshape(t, []) ==> 7
          *  }
      * * @param data type for {@code output()} output - * @param input The input tensor to be padded. - * @param paddings A two-column matrix specifying the padding sizes. The number of - * rows must be the same as the rank of `input`. - * @param mode Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions - * do not include the borders, while in symmetric mode the padded regions - * do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings` - * is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and - * it is `[1, 2, 3, 3, 2]` in symmetric mode. - * @return a new instance of MirrorPad + * @param tensor + * @param shape Defines the shape of the output tensor. + * @return a new instance of Reshape */ - public MirrorPad mirrorPad(Operand input, - Operand paddings, String mode) { - return MirrorPad.create(scope, input, paddings, mode); + public Reshape reshape(Operand tensor, + Operand shape) { + return Reshape.create(scope, tensor, shape); } /** - * Wraps an arbitrary MLIR computation expressed as a module with a main() function. - *

      - * This operation does not have an associated kernel and is not intended to be - * executed in a regular TensorFlow session. Instead it is intended to be used for - * testing or for special case where a user intends to pass custom MLIR computation - * through a TensorFlow graph with the intent of having custom tooling processing - * it downstream (when targeting a different environment, like TensorFlow lite for - * example). - * The MLIR module is expected to have a main() function that will be used as an - * entry point. The inputs to the operations will be passed as argument to the - * main() function and the returned values of the main function mapped to the - * outputs. - * Example usage: - *

      {@code
      -   *  import tensorflow as tf
      -   *  from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op
      -   *
      -   *  mlir_module = '''python
      -   *  func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> {
      -   *     %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32>
      -   *     return %ret : tensor<10x10xf32>
      -   *  }
      -   *  '''
      -   *
      -   * @tf.function def foo(x, y):
      -   *    return = mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32])
      +   * Increments variable pointed to by 'resource' until it reaches 'limit'.
          *
      -   *  graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), tf.TensorSpec([10], tf.float32)).graph.as_graph_def()
      -   *  }
      - * @param inputs - * @param mlirModule - * @param Toutputs - * @return a new instance of MlirPassthroughOp + * @param data type for {@code output()} output + * @param resource Should be from a scalar `Variable` node. + * @param limit If incrementing ref would bring it above limit, instead generates an + * 'OutOfRange' error. + * @param T + * @return a new instance of ResourceCountUpTo */ - public MlirPassthroughOp mlirPassthroughOp(Iterable> inputs, String mlirModule, - List> Toutputs) { - return MlirPassthroughOp.create(scope, inputs, mlirModule, Toutputs); + public ResourceCountUpTo resourceCountUpTo(Operand resource, Long limit, + DataType T) { + return ResourceCountUpTo.create(scope, resource, limit, T); } /** - * Creates an empty hash table that uses tensors as the backing store. - *

      - * It uses "open addressing" with quadratic reprobing to resolve - * collisions. + * Gather slices from the variable pointed to by `resource` according to `indices`. *

      - * This op creates a mutable hash table, specifying the type of its keys and - * values. Each value must be a scalar. Data can be inserted into the table using - * the insert operations. It does not support the initialization operation. + * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + * Produces an output tensor with shape `indices.shape + params.shape[1:]` where: + *

      {@code
      +   *      # Scalar indices
      +   *      output[:, ..., :] = params[indices, :, ... :]
          *
      -   * @param emptyKey The key used to represent empty key buckets internally. Must not
      -   *  be used in insert or lookup operations.
      -   * @param deletedKey
      -   * @param valueDtype Type of the table values.
      +   *      # Vector indices
      +   *      output[i, :, ..., :] = params[indices[i], :, ... :]
      +   *
      +   *      # Higher rank indices
      +   *      output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
      +   *  }
      + * + * @param data type for {@code output()} output + * @param resource + * @param indices + * @param dtype * @param options carries optional attributes values - * @return a new instance of MutableDenseHashTable + * @return a new instance of ResourceGather */ - public MutableDenseHashTable mutableDenseHashTable( - Operand emptyKey, Operand deletedKey, DataType valueDtype, - MutableDenseHashTable.Options... options) { - return MutableDenseHashTable.create(scope, emptyKey, deletedKey, valueDtype, options); + public ResourceGather resourceGather(Operand resource, + Operand indices, DataType dtype, ResourceGather.Options... options) { + return ResourceGather.create(scope, resource, indices, dtype, options); } /** - * Creates an empty hash table. - *

      - * This op creates a mutable hash table, specifying the type of its keys and - * values. Each value must be a scalar. Data can be inserted into the table using - * the insert operations. It does not support the initialization operation. * - * @param keyDtype Type of the table keys. - * @param valueDtype Type of the table values. - * @param options carries optional attributes values - * @return a new instance of MutableHashTable + * @param data type for {@code output()} output + * @param resource + * @param indices + * @param dtype + * @return a new instance of ResourceGatherNd */ - public MutableHashTable mutableHashTable(DataType keyDtype, - DataType valueDtype, MutableHashTable.Options... options) { - return MutableHashTable.create(scope, keyDtype, valueDtype, options); + public ResourceGatherNd resourceGatherNd( + Operand resource, Operand indices, DataType dtype) { + return ResourceGatherNd.create(scope, resource, indices, dtype); } /** - * Creates an empty hash table. + * Adds sparse updates to the variable referenced by `resource`. *

      - * This op creates a mutable hash table, specifying the type of its keys and - * values. Each value must be a vector. Data can be inserted into the table using - * the insert operations. It does not support the initialization operation. + * This operation computes + *

      + * # Scalar indices + * ref[indices, ...] += updates[...] + *

      + * # Vector indices (for each i) + * ref[indices[i], ...] += updates[i, ...] + *

      + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions add. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @param keyDtype Type of the table keys. - * @param valueDtype Type of the table values. - * @param options carries optional attributes values - * @return a new instance of MutableHashTableOfTensors + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterAdd */ - public MutableHashTableOfTensors mutableHashTableOfTensors( - DataType keyDtype, DataType valueDtype, MutableHashTableOfTensors.Options... options) { - return MutableHashTableOfTensors.create(scope, keyDtype, valueDtype, options); + public ResourceScatterAdd resourceScatterAdd( + Operand resource, Operand indices, Operand updates) { + return ResourceScatterAdd.create(scope, resource, indices, updates); } /** - * Creates a Mutex resource that can be locked by `MutexLock`. + * Divides sparse updates into the variable referenced by `resource`. + *

      + * This operation computes + *

      + * # Scalar indices + * ref[indices, ...] /= updates[...] + *

      + * # Vector indices (for each i) + * ref[indices[i], ...] /= updates[i, ...] + *

      + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions multiply. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @param options carries optional attributes values - * @return a new instance of Mutex + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterDiv */ - public Mutex mutex(Mutex.Options... options) { - return Mutex.create(scope, options); + public ResourceScatterDiv resourceScatterDiv( + Operand resource, Operand indices, Operand updates) { + return ResourceScatterDiv.create(scope, resource, indices, updates); } /** - * Locks a mutex resource. The output is the lock. So long as the lock tensor + * Reduces sparse updates into the variable referenced by `resource` using the `max` operation. *

      - * is alive, any other request to use `MutexLock` with this mutex will wait. + * This operation computes *

      - * This is particularly useful for creating a critical section when used in - * conjunction with `MutexLockIdentity`: - *

      {@code
      -   *  mutex = mutex_v2(
      -   *    shared_name=handle_name, container=container, name=name)
      -   *
      -   *  def execute_in_critical_section(fn, *args, **kwargs):
      -   *    lock = gen_resource_variable_ops.mutex_lock(mutex)
      -   *
      -   *    with ops.control_dependencies([lock]):
      -   *      r = fn(*args, **kwargs)
      -   *
      -   *    with ops.control_dependencies(nest.flatten(r)):
      -   *      with ops.colocate_with(mutex):
      -   *        ensure_lock_exists = mutex_lock_identity(lock)
      -   *
      -   *      # Make sure that if any element of r is accessed, all of
      -   *      # them are executed together.
      -   *      r = nest.map_structure(tf.identity, r)
      -   *
      -   *    with ops.control_dependencies([ensure_lock_exists]):
      -   *      return nest.map_structure(tf.identity, r)
      -   *  }
      - * While `fn` is running in the critical section, no other functions which wish to - * use this critical section may run. + * # Scalar indices + * ref[indices, ...] = max(ref[indices, ...], updates[...]) *

      - * Often the use case is that two executions of the same graph, in parallel, - * wish to run `fn`; and we wish to ensure that only one of them executes - * at a time. This is especially important if `fn` modifies one or more - * variables at a time. + * # Vector indices (for each i) + * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) *

      - * It is also useful if two separate functions must share a resource, but we - * wish to ensure the usage is exclusive. + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions are combined. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @param mutex The mutex resource to lock. - * @return a new instance of MutexLock + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterMax */ - public MutexLock mutexLock(Operand mutex) { - return MutexLock.create(scope, mutex); + public ResourceScatterMax resourceScatterMax( + Operand resource, Operand indices, Operand updates) { + return ResourceScatterMax.create(scope, resource, indices, updates); } /** - * Makes its input available to the next iteration. + * Reduces sparse updates into the variable referenced by `resource` using the `min` operation. + *

      + * This operation computes + *

      + * # Scalar indices + * ref[indices, ...] = min(ref[indices, ...], updates[...]) + *

      + * # Vector indices (for each i) + * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) + *

      + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions are combined. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @param data type for {@code output()} output - * @param data The tensor to be made available to the next iteration. - * @return a new instance of NextIteration + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterMin */ - public NextIteration nextIteration(Operand data) { - return NextIteration.create(scope, data); + public ResourceScatterMin resourceScatterMin( + Operand resource, Operand indices, Operand updates) { + return ResourceScatterMin.create(scope, resource, indices, updates); } /** - * Does nothing. Only useful as a placeholder for control edges. + * Multiplies sparse updates into the variable referenced by `resource`. + *

      + * This operation computes + *

      + * # Scalar indices + * ref[indices, ...] *= updates[...] + *

      + * # Vector indices (for each i) + * ref[indices[i], ...] *= updates[i, ...] + *

      + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions multiply. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @return a new instance of NoOp + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterMul */ - public NoOp noOp() { - return NoOp.create(scope); + public ResourceScatterMul resourceScatterMul( + Operand resource, Operand indices, Operand updates) { + return ResourceScatterMul.create(scope, resource, indices, updates); } /** - * Returns a one-hot tensor. + * Applies sparse addition to individual values or slices in a Variable. *

      - * The locations represented by indices in `indices` take value `on_value`, - * while all other locations take value `off_value`. + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. *

      - * If the input `indices` is rank `N`, the output will have rank `N+1`, - * The new axis is created at dimension `axis` (default: the new axis is - * appended at the end). + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. *

      - * If `indices` is a scalar the output shape will be a vector of length `depth`. + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. *

      - * If `indices` is a vector of length `features`, the output shape will be: + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: *

      {@code
      -   *    features x depth if axis == -1
      -   *    depth x features if axis == 0
      +   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
          *  }
      - * If `indices` is a matrix (batch) with shape `[batch, features]`, - * the output shape will be: + * For example, say we want to add 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that addition would look like this: *
      {@code
      -   *    batch x features x depth if axis == -1
      -   *    batch x depth x features if axis == 1
      -   *    depth x batch x features if axis == 0
      +   *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
      +   *  indices = tf.constant([[4], [3], [1], [7]])
      +   *  updates = tf.constant([9, 10, 11, 12])
      +   *  add = tf.scatter_nd_add(ref, indices, updates)
      +   *  with tf.Session() as sess:
      +   *    print sess.run(add)
          *  }
      - * Examples - * ========= + * The resulting update to ref would look like this: *

      - * Suppose that - *

      {@code
      -   *    indices = [0, 2, -1, 1]
      -   *    depth = 3
      -   *    on_value = 5.0
      -   *    off_value = 0.0
      -   *    axis = -1
      -   *  }
      - * Then output is `[4 x 3]`: - *
      {@code
      -   *  output =
      -   *    [5.0 0.0 0.0]  // one_hot(0)
      -   *    [0.0 0.0 5.0]  // one_hot(2)
      -   *    [0.0 0.0 0.0]  // one_hot(-1)
      -   *    [0.0 5.0 0.0]  // one_hot(1)
      -   *  }
      - * Suppose that - *
      {@code
      -   *    indices = [0, 2, -1, 1]
      -   *    depth = 3
      -   *    on_value = 0.0
      -   *    off_value = 3.0
      -   *    axis = 0
      -   *  }
      - * Then output is `[3 x 4]`: - *
      {@code
      -   *  output =
      -   *    [0.0 3.0 3.0 3.0]
      -   *    [3.0 3.0 3.0 0.0]
      -   *    [3.0 3.0 3.0 3.0]
      -   *    [3.0 0.0 3.0 3.0]
      -   *  //  ^                one_hot(0)
      -   *  //      ^            one_hot(2)
      -   *  //          ^        one_hot(-1)
      -   *  //              ^    one_hot(1)
      -   *  }
      - * Suppose that + * [1, 13, 3, 14, 14, 6, 7, 20] + *

      + * See `tf.scatter_nd` for more details about how to make updates to + * slices. + * + * @param ref A resource handle. Must be from a VarHandleOp. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of + * values to add to ref. + * @param options carries optional attributes values + * @return a new instance of ResourceScatterNdAdd + */ + public ResourceScatterNdAdd resourceScatterNdAdd( + Operand ref, Operand indices, Operand updates, + ResourceScatterNdAdd.Options... options) { + return ResourceScatterNdAdd.create(scope, ref, indices, updates, options); + } + + /** + * Applies sparse subtraction to individual values or slices in a Variable. + *

      + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + *

      + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + *

      + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + *

      + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: *

      {@code
      -   *    indices = [[0, 2], [1, -1]]
      -   *    depth = 3
      -   *    on_value = 1.0
      -   *    off_value = 0.0
      -   *    axis = -1
      +   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
          *  }
      - * Then output is `[2 x 2 x 3]`: + * For example, say we want to subtract 4 scattered elements from a rank-1 tensor + * with 8 elements. In Python, that subtraction would look like this: *
      {@code
      -   *  output =
      -   *    [
      -   *      [1.0, 0.0, 0.0]  // one_hot(0)
      -   *      [0.0, 0.0, 1.0]  // one_hot(2)
      -   *    ][
      -   *      [0.0, 1.0, 0.0]  // one_hot(1)
      -   *      [0.0, 0.0, 0.0]  // one_hot(-1)
      -   *    ]
      +   *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
      +   *  indices = tf.constant([[4], [3], [1], [7]])
      +   *  updates = tf.constant([9, 10, 11, 12])
      +   *  sub = tf.scatter_nd_sub(ref, indices, updates)
      +   *  with tf.Session() as sess:
      +   *    print sess.run(sub)
          *  }
      + * The resulting update to ref would look like this: + *

      + * [1, -9, 3, -6, -4, 6, 7, -4] + *

      + * See `tf.scatter_nd` for more details about how to make updates to + * slices. * - * @param data type for {@code output()} output - * @param indices A tensor of indices. - * @param depth A scalar defining the depth of the one hot dimension. - * @param onValue A scalar defining the value to fill in output when `indices[j] = i`. - * @param offValue A scalar defining the value to fill in output when `indices[j] != i`. + * @param ref A resource handle. Must be from a VarHandleOp. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of + * values to add to ref. * @param options carries optional attributes values - * @return a new instance of OneHot + * @return a new instance of ResourceScatterNdSub */ - public OneHot oneHot(Operand indices, - Operand depth, Operand onValue, Operand offValue, OneHot.Options... options) { - return OneHot.create(scope, indices, depth, onValue, offValue, options); + public ResourceScatterNdSub resourceScatterNdSub( + Operand ref, Operand indices, Operand updates, + ResourceScatterNdSub.Options... options) { + return ResourceScatterNdSub.create(scope, ref, indices, updates, options); } /** - * Returns a tensor of ones with the same shape and type as x. - * - * @param data type for {@code y()} output - * @param x a tensor of type T. - * @return a new instance of OnesLike - */ - public OnesLike onesLike(Operand x) { - return OnesLike.create(scope, x); - } - - /** - * Op removes all elements in the underlying container. - * - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of OrderedMapClear - */ - public OrderedMapClear orderedMapClear(List> dtypes, - OrderedMapClear.Options... options) { - return OrderedMapClear.create(scope, dtypes, options); - } - - /** - * Op returns the number of incomplete elements in the underlying container. - * - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of OrderedMapIncompleteSize - */ - public OrderedMapIncompleteSize orderedMapIncompleteSize(List> dtypes, - OrderedMapIncompleteSize.Options... options) { - return OrderedMapIncompleteSize.create(scope, dtypes, options); - } - - /** - * Op peeks at the values at the specified key. If the + * Applies sparse `updates` to individual values or slices within a given *

      - * underlying container does not contain this key - * this op will block until it does. This Op is optimized for - * performance. - * - * @param key - * @param indices - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of OrderedMapPeek - */ - public OrderedMapPeek orderedMapPeek(Operand key, Operand indices, - List> dtypes, OrderedMapPeek.Options... options) { - return OrderedMapPeek.create(scope, key, indices, dtypes, options); - } - - /** - * Op returns the number of elements in the underlying container. + * variable according to `indices`. + *

      + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + *

      + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + *

      + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + *

      + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + *

      {@code
      +   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
      +   *  }
      + * For example, say we want to update 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that update would look like this: + *
      {@code
      +   *      ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
      +   *      indices = tf.constant([[4], [3], [1] ,[7]])
      +   *      updates = tf.constant([9, 10, 11, 12])
      +   *      update = tf.scatter_nd_update(ref, indices, updates)
      +   *      with tf.Session() as sess:
      +   *        print sess.run(update)
      +   *  }
      + * The resulting update to ref would look like this: + *

      + * [1, 11, 3, 10, 9, 6, 7, 12] + *

      + * See `tf.scatter_nd` for more details about how to make updates to + * slices. * - * @param dtypes + * @param ref A resource handle. Must be from a VarHandleOp. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated + * values to add to ref. * @param options carries optional attributes values - * @return a new instance of OrderedMapSize + * @return a new instance of ResourceScatterNdUpdate */ - public OrderedMapSize orderedMapSize(List> dtypes, - OrderedMapSize.Options... options) { - return OrderedMapSize.create(scope, dtypes, options); + public ResourceScatterNdUpdate resourceScatterNdUpdate( + Operand ref, Operand indices, Operand updates, + ResourceScatterNdUpdate.Options... options) { + return ResourceScatterNdUpdate.create(scope, ref, indices, updates, options); } /** - * Stage (key, values) in the underlying container which behaves like a ordered + * Subtracts sparse updates from the variable referenced by `resource`. *

      - * associative container. Elements are ordered by key. + * This operation computes + *

      + * # Scalar indices + * ref[indices, ...] -= updates[...] + *

      + * # Vector indices (for each i) + * ref[indices[i], ...] -= updates[i, ...] + *

      + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions add. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @param key int64 - * @param indices - * @param values a list of tensors - * dtypes A list of data types that inserted values should adhere to. - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of OrderedMapStage + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterSub */ - public OrderedMapStage orderedMapStage(Operand key, Operand indices, - Iterable> values, List> dtypes, OrderedMapStage.Options... options) { - return OrderedMapStage.create(scope, key, indices, values, dtypes, options); + public ResourceScatterSub resourceScatterSub( + Operand resource, Operand indices, Operand updates) { + return ResourceScatterSub.create(scope, resource, indices, updates); } /** - * Op removes and returns the values associated with the key + * Assigns sparse updates to the variable referenced by `resource`. *

      - * from the underlying container. If the underlying container - * does not contain this key, the op will block until it does. + * This operation computes + *

      + * # Scalar indices + * ref[indices, ...] = updates[...] + *

      + * # Vector indices (for each i) + * ref[indices[i], ...] = updates[i, ...] + *

      + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] * - * @param key - * @param indices - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of OrderedMapUnstage + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterUpdate */ - public OrderedMapUnstage orderedMapUnstage(Operand key, Operand indices, - List> dtypes, OrderedMapUnstage.Options... options) { - return OrderedMapUnstage.create(scope, key, indices, dtypes, options); + public ResourceScatterUpdate resourceScatterUpdate( + Operand resource, Operand indices, Operand updates) { + return ResourceScatterUpdate.create(scope, resource, indices, updates); } /** - * Op removes and returns the (key, value) element with the smallest + * Assign `value` to the sliced l-value reference of `ref`. *

      - * key from the underlying container. If the underlying container - * does not contain elements, the op will block until it does. + * The values of `value` are assigned to the positions in the variable + * `ref` that are selected by the slice parameters. The slice parameters + * `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. + *

      + * NOTE this op currently does not support broadcasting and so `value`'s + * shape must be exactly the shape produced by the slice of `ref`. * - * @param indices - * @param dtypes + * @param ref + * @param begin + * @param end + * @param strides + * @param value * @param options carries optional attributes values - * @return a new instance of OrderedMapUnstageNoKey + * @return a new instance of ResourceStridedSliceAssign */ - public OrderedMapUnstageNoKey orderedMapUnstageNoKey(Operand indices, - List> dtypes, OrderedMapUnstageNoKey.Options... options) { - return OrderedMapUnstageNoKey.create(scope, indices, dtypes, options); + public ResourceStridedSliceAssign resourceStridedSliceAssign( + Operand ref, Operand begin, Operand end, Operand strides, Operand value, + ResourceStridedSliceAssign.Options... options) { + return ResourceStridedSliceAssign.create(scope, ref, begin, end, strides, value, options); } /** - * Pads a tensor. + * Reverses specific dimensions of a tensor. *

      - * This operation pads `input` according to the `paddings` and `constant_values` - * you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is - * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates - * how many padding values to add before the contents of `input` in that dimension, - * and `paddings[D, 1]` indicates how many padding values to add after the contents - * of `input` in that dimension. `constant_values` is a scalar tensor of the same - * type as `input` that indicates the value to use for padding `input`. + * NOTE `tf.reverse` has now changed behavior in preparation for 1.0. + * `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0. *

      - * The padded size of each dimension D of the output is: + * Given a `tensor`, and a `int32` tensor `axis` representing the set of + * dimensions of `tensor` to reverse. This operation reverses each dimension + * `i` for which there exists `j` s.t. `axis[j] == i`. *

      - * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + * `tensor` can have up to 8 dimensions. The number of dimensions specified + * in `axis` may be 0 or more entries. If an index is specified more than + * once, a InvalidArgument error is raised. *

      * For example: *

      {@code
      -   *  # 't' is [[1, 1], [2, 2]]
      -   *  # 'paddings' is [[1, 1], [2, 2]]
      -   *  # 'constant_values' is 0
      -   *  # rank of 't' is 2
      -   *  pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
      -   *                        [0, 0, 1, 1, 0, 0]
      -   *                        [0, 0, 2, 2, 0, 0]
      -   *                        [0, 0, 0, 0, 0, 0]]
      +   *  # tensor 't' is [[[[ 0,  1,  2,  3],
      +   *  #                  [ 4,  5,  6,  7],
      +   *  #                  [ 8,  9, 10, 11]],
      +   *  #                 [[12, 13, 14, 15],
      +   *  #                  [16, 17, 18, 19],
      +   *  #                  [20, 21, 22, 23]]]]
      +   *  # tensor 't' shape is [1, 2, 3, 4]
      +   *
      +   *  # 'dims' is [3] or 'dims' is [-1]
      +   *  reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
      +   *                          [ 7,  6,  5,  4],
      +   *                          [ 11, 10, 9, 8]],
      +   *                         [[15, 14, 13, 12],
      +   *                          [19, 18, 17, 16],
      +   *                          [23, 22, 21, 20]]]]
      +   *
      +   *  # 'dims' is '[1]' (or 'dims' is '[-3]')
      +   *  reverse(t, dims) ==> [[[[12, 13, 14, 15],
      +   *                          [16, 17, 18, 19],
      +   *                          [20, 21, 22, 23]
      +   *                         [[ 0,  1,  2,  3],
      +   *                          [ 4,  5,  6,  7],
      +   *                          [ 8,  9, 10, 11]]]]
      +   *
      +   *  # 'dims' is '[2]' (or 'dims' is '[-2]')
      +   *  reverse(t, dims) ==> [[[[8, 9, 10, 11],
      +   *                          [4, 5, 6, 7],
      +   *                          [0, 1, 2, 3]]
      +   *                         [[20, 21, 22, 23],
      +   *                          [16, 17, 18, 19],
      +   *                          [12, 13, 14, 15]]]]
          *  }
      * * @param data type for {@code output()} output - * @param input - * @param paddings - * @param constantValues - * @return a new instance of Pad - */ - public Pad pad(Operand input, Operand paddings, - Operand constantValues) { - return Pad.create(scope, input, paddings, constantValues); + * @param tensor Up to 8-D. + * @param axis 1-D. The indices of the dimensions to reverse. Must be in the range + * `[-rank(tensor), rank(tensor))`. + * @return a new instance of Reverse + */ + public Reverse reverse(Operand tensor, + Operand axis) { + return Reverse.create(scope, tensor, axis); } /** - * Concatenates a list of `N` tensors along the first dimension. + * Reverses variable length slices. *

      - * The input tensors are all required to have size 1 in the first dimension. + * This op first slices `input` along the dimension `batch_dim`, and for each + * slice `i`, reverses the first `seq_lengths[i]` elements along + * the dimension `seq_dim`. + *

      + * The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, + * and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. + *

      + * The output slice `i` along dimension `batch_dim` is then given by input + * slice `i`, with the first `seq_lengths[i]` slices along dimension + * `seq_dim` reversed. *

      * For example: *

      {@code
      -   *  # 'x' is [[1, 4]]
      -   *  # 'y' is [[2, 5]]
      -   *  # 'z' is [[3, 6]]
      -   *  parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
      +   *  # Given this:
      +   *  batch_dim = 0
      +   *  seq_dim = 1
      +   *  input.dims = (4, 8, ...)
      +   *  seq_lengths = [7, 2, 3, 5]
      +   *
      +   *  # then slices of input are reversed on seq_dim, but only up to seq_lengths:
      +   *  output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
      +   *  output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
      +   *  output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
      +   *  output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
      +   *
      +   *  # while entries past seq_lens are copied through:
      +   *  output[0, 7:, :, ...] = input[0, 7:, :, ...]
      +   *  output[1, 2:, :, ...] = input[1, 2:, :, ...]
      +   *  output[2, 3:, :, ...] = input[2, 3:, :, ...]
      +   *  output[3, 2:, :, ...] = input[3, 2:, :, ...]
      +   *  }
      + * In contrast, if: + *
      {@code
      +   *  # Given this:
      +   *  batch_dim = 2
      +   *  seq_dim = 0
      +   *  input.dims = (8, ?, 4, ...)
      +   *  seq_lengths = [7, 2, 3, 5]
      +   *
      +   *  # then slices of input are reversed on seq_dim, but only up to seq_lengths:
      +   *  output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
      +   *  output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
      +   *  output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
      +   *  output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
      +   *
      +   *  # while entries past seq_lens are copied through:
      +   *  output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
      +   *  output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
      +   *  output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
      +   *  output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
          *  }
      - * The difference between concat and parallel_concat is that concat requires all - * of the inputs be computed before the operation will begin but doesn't require - * that the input shapes be known during graph construction. Parallel concat - * will copy pieces of the input into the output as they become available, in - * some situations this can provide a performance benefit. * * @param data type for {@code output()} output - * @param values Tensors to be concatenated. All must have size 1 in the first dimension - * and same shape. - * @param shape the final shape of the result; should be equal to the shapes of any input - * but with the number of input values in the first dimension. - * @return a new instance of ParallelConcat + * @param input The input to reverse. + * @param seqLengths 1-D with length `input.dims(batch_dim)` and + * `max(seq_lengths) <= input.dims(seq_dim)` + * @param seqDim The dimension which is partially reversed. + * @param options carries optional attributes values + * @return a new instance of ReverseSequence */ - public ParallelConcat parallelConcat(Iterable> values, - Shape shape) { - return ParallelConcat.create(scope, values, shape); + public ReverseSequence reverseSequence(Operand input, + Operand seqLengths, Long seqDim, ReverseSequence.Options... options) { + return ReverseSequence.create(scope, input, seqLengths, seqDim, options); } /** - * Interleave the values from the `data` tensors into a single tensor. + * Rolls the elements of a tensor along an axis. *

      - * Builds a merged tensor such that - *

      {@code
      -   *      merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
      -   *  }
      - * For example, if each `indices[m]` is scalar or vector, we have + * The elements are shifted positively (towards larger indices) by the offset of + * `shift` along the dimension of `axis`. Negative `shift` values will shift + * elements in the opposite direction. Elements that roll passed the last position + * will wrap around to the first and vice versa. Multiple shifts along multiple + * axes may be specified. + *

      + * For example: *

      {@code
      -   *      # Scalar indices:
      -   *      merged[indices[m], ...] = data[m][...]
      +   *  # 't' is [0, 1, 2, 3, 4]
      +   *  roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]
          *
      -   *      # Vector indices:
      -   *      merged[indices[m][i], ...] = data[m][i, ...]
      +   *  # shifting along multiple dimensions
      +   *  # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
      +   *  roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]
      +   *
      +   *  # shifting along the same axis multiple times
      +   *  # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
      +   *  roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]
          *  }
      - * Each `data[i].shape` must start with the corresponding `indices[i].shape`, - * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we - * must have `data[i].shape = indices[i].shape + constant`. In terms of this - * `constant`, the output shape is + * + * @param data type for {@code output()} output + * @param input + * @param shift Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which + * elements are shifted positively (towards larger indices) along the dimension + * specified by `axis[i]`. Negative shifts will roll the elements in the opposite + * direction. + * @param axis Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift + * `shift[i]` should occur. If the same axis is referenced more than once, the + * total shift for that axis will be the sum of all the shifts that belong to that + * axis. + * @return a new instance of Roll + */ + public Roll roll(Operand input, + Operand shift, Operand axis) { + return Roll.create(scope, input, shift, axis); + } + + /** + * Perform batches of RPC requests. *

      - * merged.shape = [max(indices)] + constant + * This op asynchronously performs either a single RPC request, or a batch + * of requests. RPC requests are defined by three main parameters: *

      - * Values may be merged in parallel, so if an index appears in both `indices[m][i]` - * and `indices[n][j]`, the result may be invalid. This differs from the normal - * DynamicStitch operator that defines the behavior in that case. + * - `address` (the host+port or BNS address of the request) + * - `method` (the RPC method name for the request) + * - `request` (the serialized proto string, or vector of strings, + * of the RPC request argument). *

      - * For example: + * For example, if you have an RPC service running on port localhost:2345, + * and its interface is configured with the following proto declaration: *

      {@code
      -   *      indices[0] = 6
      -   *      indices[1] = [4, 1]
      -   *      indices[2] = [[5, 2], [0, 3]]
      -   *      data[0] = [61, 62]
      -   *      data[1] = [[41, 42], [11, 12]]
      -   *      data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
      -   *      merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
      -   *                [51, 52], [61, 62]]
      +   *  service MyService {
      +   *    rpc MyMethod(MyRequestProto) returns (MyResponseProto) {
      +   *    }
      +   *  };
          *  }
      - * This method can be used to merge partitions created by `dynamic_partition` - * as illustrated on the following example: + * then call this op with arguments: *
      {@code
      -   *      # Apply function (increments x_i) on elements for which a certain condition
      -   *      # apply (x_i != -1 in this example).
      -   *      x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
      -   *      condition_mask=tf.not_equal(x,tf.constant(-1.))
      -   *      partitioned_data = tf.dynamic_partition(
      -   *          x, tf.cast(condition_mask, tf.int32) , 2)
      -   *      partitioned_data[1] = partitioned_data[1] + 1.0
      -   *      condition_indices = tf.dynamic_partition(
      -   *          tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
      -   *      x = tf.dynamic_stitch(condition_indices, partitioned_data)
      -   *      # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
      -   *      # unchanged.
      +   *  address = "localhost:2345"
      +   *  method = "MyService/MyMethod"
          *  }
      - *
      - * - *
      - * - * @param data type for {@code merged()} output - * @param indices - * @param data - * @return a new instance of ParallelDynamicStitch - */ - public ParallelDynamicStitch parallelDynamicStitch( - Iterable> indices, Iterable> data) { - return ParallelDynamicStitch.create(scope, indices, data); - } - - /** - * A placeholder op for a value that will be fed into the computation. + * The `request` tensor is a string tensor representing serialized `MyRequestProto` + * strings; and the output string tensor `response` will have the same shape + * and contain (upon successful completion) corresponding serialized + * `MyResponseProto` strings. *

      - * N.B. This operation will fail with an error if it is executed. It is - * intended as a way to represent a value that will always be fed, and to - * provide attrs that enable the fed value to be checked at runtime. + * For example, to send a single, empty, `MyRequestProto`, call + * this op with `request = ""`. To send 5 parallel empty requests, + * call this op with `request = ["", "", "", "", ""]`. + *

      + * More generally, one can create a batch of `MyRequestProto` serialized protos + * from regular batched tensors using the `encode_proto` op, and convert + * the response `MyResponseProto` serialized protos to batched tensors + * using the `decode_proto` op. + *

      + * NOTE Working with serialized proto strings is faster than instantiating + * actual proto objects in memory, so no performance degradation is expected + * compared to writing custom kernels for this workflow. + *

      + * If the connection fails or the remote worker returns an error + * status, the op reraises this exception locally. + *

      + * See the `TryRpc` op if you prefer to handle RPC failures manually in the graph. * - * @param data type for {@code output()} output - * @param dtype The type of elements in the tensor. + * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. + * If this tensor has more than 1 element, then multiple parallel rpc requests + * are sent. This argument broadcasts with `method` and `request`. + * @param method `0-D` or `1-D`. The method address on the RPC server. + * If this tensor has more than 1 element, then multiple parallel rpc requests + * are sent. This argument broadcasts with `address` and `request`. + * @param request `0-D` or `1-D`. Serialized proto strings: the rpc request argument. + * If this tensor has more than 1 element, then multiple parallel rpc requests + * are sent. This argument broadcasts with `address` and `method`. * @param options carries optional attributes values - * @return a new instance of Placeholder - */ - public Placeholder placeholder(DataType dtype, - Placeholder.Options... options) { - return Placeholder.create(scope, dtype, options); - } - - /** - * A placeholder op that passes through `input` when its output is not fed. - * - * @param data type for {@code output()} output - * @param input The default value to produce when `output` is not fed. - * @param shape The (possibly partial) shape of the tensor. - * @return a new instance of PlaceholderWithDefault + * @return a new instance of Rpc */ - public PlaceholderWithDefault placeholderWithDefault(Operand input, - Shape shape) { - return PlaceholderWithDefault.create(scope, input, shape); + public Rpc rpc(Operand address, Operand method, Operand request, + Rpc.Options... options) { + return Rpc.create(scope, address, method, request, options); } /** - * Prints a string scalar. + * Adds sparse updates to a variable reference. *

      - * Prints a string scalar to the desired output_stream. - * - * @param input The string scalar to print. - * @param options carries optional attributes values - * @return a new instance of Print - */ - public Print print(Operand input, Print.Options... options) { - return Print.create(scope, input, options); - } - - /** - * Computes the product of elements across dimensions of a tensor. + * This operation computes *

      - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * # Scalar indices + * ref[indices, ...] += updates[...] + *

      + * # Vector indices (for each i) + * ref[indices[i], ...] += updates[i, ...] + *

      + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + *

      + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions add. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @param data type for {@code output()} output - * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. + * @param data type for {@code outputRef()} output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. * @param options carries optional attributes values - * @return a new instance of Prod + * @return a new instance of ScatterAdd */ - public Prod prod(Operand input, Operand axis, - Prod.Options... options) { - return Prod.create(scope, input, axis, options); + public ScatterAdd scatterAdd(Operand ref, + Operand indices, Operand updates, ScatterAdd.Options... options) { + return ScatterAdd.create(scope, ref, indices, updates, options); } /** - * Reshapes a quantized tensor as per the Reshape op. + * Divides a variable reference by sparse updates. *

      - * ``` + * This operation computes + *

      {@code
      +   *      # Scalar indices
      +   *      ref[indices, ...] /= updates[...]
          *
      -   * @param  data type for {@code output()} output
      -   * @param tensor
      -   * @param shape Defines the shape of the output tensor.
      -   * @param inputMin The minimum value of the input.
      -   * @param inputMax The maximum value of the input.
      -   * @return a new instance of QuantizedReshape
      -   */
      -  public  QuantizedReshape quantizedReshape(
      -      Operand tensor, Operand shape, Operand inputMin, Operand inputMax) {
      -    return QuantizedReshape.create(scope, tensor, shape, inputMin, inputMax);
      -  }
      -
      -  /**
      -   * Creates a sequence of numbers.
      +   *      # Vector indices (for each i)
      +   *      ref[indices[i], ...] /= updates[i, ...]
      +   *
      +   *      # High rank indices (for each i, ..., j)
      +   *      ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
      +   *  }
      + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. *

      - * This operation creates a sequence of numbers that begins at `start` and - * extends by increments of `delta` up to but not including `limit`. + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions divide. *

      - * For example: - *

      {@code
      -   *  # 'start' is 3
      -   *  # 'limit' is 18
      -   *  # 'delta' is 3
      -   *  tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
      -   *  }
      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. * - * @param data type for {@code output()} output - * @param start 0-D (scalar). First entry in the sequence. - * @param limit 0-D (scalar). Upper limit of sequence, exclusive. - * @param delta 0-D (scalar). Optional. Default is 1. Number that increments `start`. - * @return a new instance of Range + * @param data type for {@code outputRef()} output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of values that `ref` is divided by. + * @param options carries optional attributes values + * @return a new instance of ScatterDiv */ - public Range range(Operand start, Operand limit, Operand delta) { - return Range.create(scope, start, limit, delta); + public ScatterDiv scatterDiv(Operand ref, + Operand indices, Operand updates, ScatterDiv.Options... options) { + return ScatterDiv.create(scope, ref, indices, updates, options); } /** - * Returns the rank of a tensor. + * Reduces sparse updates into a variable reference using the `max` operation. *

      - * This operation returns an integer representing the rank of `input`. + * This operation computes *

      - * For example: - *

      {@code
      -   *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
      -   *  # shape of tensor 't' is [2, 2, 3]
      -   *  rank(t) ==> 3
      -   *  }
      - * Note: The rank of a tensor is not the same as the rank of a matrix. The rank - * of a tensor is the number of indices required to uniquely select each element - * of the tensor. Rank is also known as "order", "degree", or "ndims." - * - * @param input - * @return a new instance of Rank - */ - public Rank rank(Operand input) { - return Rank.create(scope, input); - } - - /** - * Reads the value of a variable. + * # Scalar indices + * ref[indices, ...] = max(ref[indices, ...], updates[...]) *

      - * The tensor returned by this operation is immutable. + * # Vector indices (for each i) + * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) *

      - * The value returned by this operation is guaranteed to be influenced by all the - * writes on which this operation depends directly or indirectly, and to not be - * influenced by any of the writes which depend directly or indirectly on this - * operation. + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + *

      + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions combine. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @param data type for {@code value()} output - * @param resource handle to the resource in which to store the variable. - * @param dtype the dtype of the value. - * @return a new instance of ReadVariableOp + * @param data type for {@code outputRef()} output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to reduce into `ref`. + * @param options carries optional attributes values + * @return a new instance of ScatterMax */ - public ReadVariableOp readVariableOp(Operand resource, - DataType dtype) { - return ReadVariableOp.create(scope, resource, dtype); + public ScatterMax scatterMax(Operand ref, + Operand indices, Operand updates, ScatterMax.Options... options) { + return ScatterMax.create(scope, ref, indices, updates, options); } /** - * Computes the "logical and" of elements across dimensions of a tensor. + * Reduces sparse updates into a variable reference using the `min` operation. *

      - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * This operation computes + *

      + * # Scalar indices + * ref[indices, ...] = min(ref[indices, ...], updates[...]) + *

      + * # Vector indices (for each i) + * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) + *

      + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + *

      + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions combine. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. + * @param data type for {@code outputRef()} output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to reduce into `ref`. * @param options carries optional attributes values - * @return a new instance of ReduceAll + * @return a new instance of ScatterMin */ - public ReduceAll reduceAll(Operand input, Operand axis, - ReduceAll.Options... options) { - return ReduceAll.create(scope, input, axis, options); + public ScatterMin scatterMin(Operand ref, + Operand indices, Operand updates, ScatterMin.Options... options) { + return ScatterMin.create(scope, ref, indices, updates, options); } /** - * Computes the "logical or" of elements across dimensions of a tensor. + * Multiplies sparse updates into a variable reference. *

      - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * This operation computes + *

      {@code
      +   *      # Scalar indices
      +   *      ref[indices, ...] *= updates[...]
          *
      -   * @param input The tensor to reduce.
      -   * @param axis The dimensions to reduce. Must be in the range
      -   *  `[-rank(input), rank(input))`.
      +   *      # Vector indices (for each i)
      +   *      ref[indices[i], ...] *= updates[i, ...]
      +   *
      +   *      # High rank indices (for each i, ..., j)
      +   *      ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
      +   *  }
      + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions multiply. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + * @param data type for {@code outputRef()} output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to multiply to `ref`. * @param options carries optional attributes values - * @return a new instance of ReduceAny + * @return a new instance of ScatterMul */ - public ReduceAny reduceAny(Operand input, Operand axis, - ReduceAny.Options... options) { - return ReduceAny.create(scope, input, axis, options); + public ScatterMul scatterMul(Operand ref, + Operand indices, Operand updates, ScatterMul.Options... options) { + return ScatterMul.create(scope, ref, indices, updates, options); } /** - * Computes the maximum of elements across dimensions of a tensor. + * Scatter `updates` into a new tensor according to `indices`. *

      - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * Creates a new tensor by applying sparse `updates` to individual values or + * slices within a tensor (initially zero for numeric, empty for string) of + * the given `shape` according to indices. This operator is the inverse of the + * `tf.gather_nd` operator which extracts values or slices from a given tensor. + *

      + * This operation is similar to tensor_scatter_add, except that the tensor is + * zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical + * to `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)` + *

      + * If `indices` contains duplicates, then their updates are accumulated (summed). + *

      + * WARNING: The order in which updates are applied is nondeterministic, so the + * output will be nondeterministic if `indices` contains duplicates -- because + * of some numerical approximation issues, numbers summed in different order + * may yield different results. + *

      + * `indices` is an integer tensor containing indices into a new tensor of shape + * `shape`. The last dimension of `indices` can be at most the rank of `shape`: + *

      + * indices.shape[-1] <= shape.rank + *

      + * The last dimension of `indices` corresponds to indices into elements + * (if `indices.shape[-1] = shape.rank`) or slices + * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + * `shape`. `updates` is a tensor with shape + *

      + * indices.shape[:-1] + shape[indices.shape[-1]:] + *

      + * The simplest form of scatter is to insert individual elements in a tensor by + * index. For example, say we want to insert 4 scattered elements in a rank-1 + * tensor with 8 elements. + *

      + *

      + * + *
      + *

      + * In Python, this scatter operation would look like this: + *

      {@code
      +   *      indices = tf.constant([[4], [3], [1], [7]])
      +   *      updates = tf.constant([9, 10, 11, 12])
      +   *      shape = tf.constant([8])
      +   *      scatter = tf.scatter_nd(indices, updates, shape)
      +   *      print(scatter)
      +   *  }
      + * The resulting tensor would look like this: + *

      + * [0, 11, 0, 10, 9, 0, 0, 12] + *

      + * We can also, insert entire slices of a higher rank tensor all at once. For + * example, if we wanted to insert two slices in the first dimension of a + * rank-3 tensor with two matrices of new values. + *

      + *

      + * + *
      + *

      + * In Python, this scatter operation would look like this: + *

      {@code
      +   *      indices = tf.constant([[0], [2]])
      +   *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
      +   *                              [7, 7, 7, 7], [8, 8, 8, 8]],
      +   *                             [[5, 5, 5, 5], [6, 6, 6, 6],
      +   *                              [7, 7, 7, 7], [8, 8, 8, 8]]])
      +   *      shape = tf.constant([4, 4, 4])
      +   *      scatter = tf.scatter_nd(indices, updates, shape)
      +   *      print(scatter)
      +   *  }
      + * The resulting tensor would look like this: + *

      + * [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] + *

      + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, the index is ignored. * - * @param data type for {@code output()} output - * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values - * @return a new instance of ReduceMax + * @param data type for {@code output()} output + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @param shape 1-D. The shape of the resulting tensor. + * @return a new instance of ScatterNd */ - public ReduceMax reduceMax(Operand input, - Operand axis, ReduceMax.Options... options) { - return ReduceMax.create(scope, input, axis, options); + public ScatterNd scatterNd(Operand indices, + Operand updates, Operand shape) { + return ScatterNd.create(scope, indices, updates, shape); } /** - * Computes the minimum of elements across dimensions of a tensor. + * Applies sparse addition to individual values or slices in a Variable. *

      - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + *

      + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + *

      + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + *

      + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + *

      {@code
      +   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
      +   *  }
      + * For example, say we want to add 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that addition would look like this: + *
      {@code
      +   *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
      +   *  indices = tf.constant([[4], [3], [1], [7]])
      +   *  updates = tf.constant([9, 10, 11, 12])
      +   *  add = tf.scatter_nd_add(ref, indices, updates)
      +   *  with tf.Session() as sess:
      +   *    print sess.run(add)
      +   *  }
      + * The resulting update to ref would look like this: + *

      + * [1, 13, 3, 14, 14, 6, 7, 20] + *

      + * See `tf.scatter_nd` for more details about how to make updates to + * slices. * - * @param data type for {@code output()} output - * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. + * @param data type for {@code outputRef()} output + * @param ref A mutable Tensor. Should be from a Variable node. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values + * to add to ref. * @param options carries optional attributes values - * @return a new instance of ReduceMin + * @return a new instance of ScatterNdAdd */ - public ReduceMin reduceMin(Operand input, - Operand axis, ReduceMin.Options... options) { - return ReduceMin.create(scope, input, axis, options); + public ScatterNdAdd scatterNdAdd(Operand ref, + Operand indices, Operand updates, ScatterNdAdd.Options... options) { + return ScatterNdAdd.create(scope, ref, indices, updates, options); } /** - * Computes the product of elements across dimensions of a tensor. + * Applies sparse addition to `input` using individual values or slices *

      - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * from `updates` according to indices `indices`. The updates are non-aliasing: + * `input` is only modified in-place if no other operations will use it. + * Otherwise, a copy of `input` is made. This operation has a gradient with + * respect to both `input` and `updates`. + *

      + * `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + *

      + * `indices` must be integer tensor, containing indices into `input`. + * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. + *

      + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or `(P-K)`-dimensional slices + * (if `K < P`) along the `K`th dimension of `input`. + *

      + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + *

      + * $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ + *

      + * For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 + * elements. In Python, that addition would look like this: + *

      + * input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * output = tf.scatter_nd_non_aliasing_add(input, indices, updates) + * with tf.Session() as sess: + * print(sess.run(output)) + *

      + * The resulting value `output` would look like this: + *

      + * [1, 13, 3, 14, 14, 6, 7, 20] + *

      + * See `tf.scatter_nd` for more details about how to make updates to slices. * * @param data type for {@code output()} output - * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values - * @return a new instance of ReduceProd + * @param input A Tensor. + * @param indices A Tensor. Must be one of the following types: `int32`, `int64`. + * A tensor of indices into `input`. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values + * to add to `input`. + * @return a new instance of ScatterNdNonAliasingAdd */ - public ReduceProd reduceProd(Operand input, - Operand axis, ReduceProd.Options... options) { - return ReduceProd.create(scope, input, axis, options); + public ScatterNdNonAliasingAdd scatterNdNonAliasingAdd( + Operand input, Operand indices, Operand updates) { + return ScatterNdNonAliasingAdd.create(scope, input, indices, updates); } /** - * Computes the sum of elements across dimensions of a tensor. + * Applies sparse subtraction to individual values or slices in a Variable. *

      - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * within a given variable according to `indices`. + *

      + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + *

      + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + *

      + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + *

      + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + *

      {@code
      +   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
      +   *  }
      + * For example, say we want to subtract 4 scattered elements from a rank-1 tensor + * with 8 elements. In Python, that subtraction would look like this: + *
      {@code
      +   *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
      +   *  indices = tf.constant([[4], [3], [1], [7]])
      +   *  updates = tf.constant([9, 10, 11, 12])
      +   *  sub = tf.scatter_nd_sub(ref, indices, updates)
      +   *  with tf.Session() as sess:
      +   *    print sess.run(sub)
      +   *  }
      + * The resulting update to ref would look like this: + *

      + * [1, -9, 3, -6, -4, 6, 7, -4] + *

      + * See `tf.scatter_nd` for more details about how to make updates to + * slices. * - * @param data type for {@code output()} output - * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. + * @param data type for {@code outputRef()} output + * @param ref A mutable Tensor. Should be from a Variable node. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values + * to subtract from ref. * @param options carries optional attributes values - * @return a new instance of ReduceSum + * @return a new instance of ScatterNdSub */ - public ReduceSum reduceSum(Operand input, - Operand axis, ReduceSum.Options... options) { - return ReduceSum.create(scope, input, axis, options); - } - - /** - * Makes its input available to the next iteration. - * - * @param data type for {@code output()} output - * @param data The tensor to be made available to the next iteration. - * @return a new instance of RefNextIteration - */ - public RefNextIteration refNextIteration(Operand data) { - return RefNextIteration.create(scope, data); - } - - /** - * Forwards the `index`th element of `inputs` to `output`. - * - * @param data type for {@code output()} output - * @param index A scalar that determines the input that gets selected. - * @param inputs A list of ref tensors, one of which will be forwarded to `output`. - * @return a new instance of RefSelect - */ - public RefSelect refSelect(Operand index, - Iterable> inputs) { - return RefSelect.create(scope, index, inputs); + public ScatterNdSub scatterNdSub(Operand ref, + Operand indices, Operand updates, ScatterNdSub.Options... options) { + return ScatterNdSub.create(scope, ref, indices, updates, options); } /** - * Forwards the ref tensor `data` to the output port determined by `pred`. - *

      - * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, - * the data goes to `output_false`. + * Applies sparse `updates` to individual values or slices within a given *

      - * See also `Switch` and `Merge`. - * - * @param data type for {@code outputFalse()} output - * @param data The ref tensor to be forwarded to the appropriate output. - * @param pred A scalar that specifies which output port will receive data. - * @return a new instance of RefSwitch - */ - public RefSwitch refSwitch(Operand data, Operand pred) { - return RefSwitch.create(scope, data, pred); - } - - /** - * Execute a sub graph on a remote processor. + * variable according to `indices`. *

      - * The graph specifications(such as graph itself, input tensors and output names) - * are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo - * as serialized_remote_fused_graph_execute_info. - * The specifications will be passed to a dedicated registered - * remote fused graph executor. The executor will send the graph specifications - * to a remote processor and execute that graph. The execution results - * will be passed to consumer nodes as outputs of this node. - * - * @param inputs Arbitrary number of tensors with arbitrary data types - * @param Toutputs - * @param serializedRemoteFusedGraphExecuteInfo Serialized protocol buffer - * of RemoteFusedGraphExecuteInfo which contains graph specifications. - * @return a new instance of RemoteFusedGraphExecute - */ - public RemoteFusedGraphExecute remoteFusedGraphExecute(Iterable> inputs, - List> Toutputs, String serializedRemoteFusedGraphExecuteInfo) { - return RemoteFusedGraphExecute.create(scope, inputs, Toutputs, serializedRemoteFusedGraphExecuteInfo); - } - - /** - * Reshapes a tensor. + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. *

      - * Given `tensor`, this operation returns a tensor that has the same values - * as `tensor` with shape `shape`. + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. *

      - * If one component of 1-D tensor `shape` is the special value -1, the size of that - * dimension is computed so that the total size remains constant. In particular, a - * `shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be - * unknown. + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. *

      - * The `shape` must be 1-D and the operation returns a tensor with shape - * `shape` filled with the values of `tensor`. In this case, the number of elements - * implied by `shape` must be the same as the number of elements in `tensor`. + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: *

      - * It is an error if `shape` is not 1-D. + * $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ *

      - * For example: + * For example, say we want to update 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that update would look like this: *

      {@code
      -   *  # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
      -   *  # tensor 't' has shape [9]
      -   *  reshape(t, [3, 3]) ==> [[1, 2, 3],
      -   *                          [4, 5, 6],
      -   *                          [7, 8, 9]]
      -   *
      -   *  # tensor 't' is [[[1, 1], [2, 2]],
      -   *  #                [[3, 3], [4, 4]]]
      -   *  # tensor 't' has shape [2, 2, 2]
      -   *  reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
      -   *                          [3, 3, 4, 4]]
      -   *
      -   *  # tensor 't' is [[[1, 1, 1],
      -   *  #                 [2, 2, 2]],
      -   *  #                [[3, 3, 3],
      -   *  #                 [4, 4, 4]],
      -   *  #                [[5, 5, 5],
      -   *  #                 [6, 6, 6]]]
      -   *  # tensor 't' has shape [3, 2, 3]
      -   *  # pass '[-1]' to flatten 't'
      -   *  reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
      -   *
      -   *  # -1 can also be used to infer the shape
      -   *
      -   *  # -1 is inferred to be 9:
      -   *  reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
      -   *                           [4, 4, 4, 5, 5, 5, 6, 6, 6]]
      -   *  # -1 is inferred to be 2:
      -   *  reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
      -   *                           [4, 4, 4, 5, 5, 5, 6, 6, 6]]
      -   *  # -1 is inferred to be 3:
      -   *  reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
      -   *                                [2, 2, 2],
      -   *                                [3, 3, 3]],
      -   *                               [[4, 4, 4],
      -   *                                [5, 5, 5],
      -   *                                [6, 6, 6]]]
      -   *
      -   *  # tensor 't' is [7]
      -   *  # shape `[]` reshapes to a scalar
      -   *  reshape(t, []) ==> 7
      +   *      ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
      +   *      indices = tf.constant([[4], [3], [1] ,[7]])
      +   *      updates = tf.constant([9, 10, 11, 12])
      +   *      update = tf.scatter_nd_update(ref, indices, updates)
      +   *      with tf.Session() as sess:
      +   *        print sess.run(update)
          *  }
      + * The resulting update to ref would look like this: + *

      + * [1, 11, 3, 10, 9, 6, 7, 12] + *

      + * See `tf.scatter_nd` for more details about how to make updates to + * slices. + *

      + * See also `tf.scatter_update` and `tf.batch_scatter_update`. * - * @param data type for {@code output()} output - * @param tensor - * @param shape Defines the shape of the output tensor. - * @return a new instance of Reshape - */ - public Reshape reshape(Operand tensor, - Operand shape) { - return Reshape.create(scope, tensor, shape); - } - - /** - * Increments variable pointed to by 'resource' until it reaches 'limit'. - * - * @param data type for {@code output()} output - * @param resource Should be from a scalar `Variable` node. - * @param limit If incrementing ref would bring it above limit, instead generates an - * 'OutOfRange' error. - * @param T - * @return a new instance of ResourceCountUpTo + * @param data type for {@code outputRef()} output + * @param ref A mutable Tensor. Should be from a Variable node. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated + * values to add to ref. + * @param options carries optional attributes values + * @return a new instance of ScatterNdUpdate */ - public ResourceCountUpTo resourceCountUpTo(Operand resource, Long limit, - DataType T) { - return ResourceCountUpTo.create(scope, resource, limit, T); + public ScatterNdUpdate scatterNdUpdate(Operand ref, + Operand indices, Operand updates, ScatterNdUpdate.Options... options) { + return ScatterNdUpdate.create(scope, ref, indices, updates, options); } /** - * Gather slices from the variable pointed to by `resource` according to `indices`. + * Subtracts sparse updates to a variable reference. *

      - * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). - * Produces an output tensor with shape `indices.shape + params.shape[1:]` where: *

      {@code
          *      # Scalar indices
      -   *      output[:, ..., :] = params[indices, :, ... :]
      +   *      ref[indices, ...] -= updates[...]
          *
      -   *      # Vector indices
      -   *      output[i, :, ..., :] = params[indices[i], :, ... :]
      +   *      # Vector indices (for each i)
      +   *      ref[indices[i], ...] -= updates[i, ...]
          *
      -   *      # Higher rank indices
      -   *      output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
      +   *      # High rank indices (for each i, ..., j)
      +   *      ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
          *  }
      + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + *

      + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their (negated) contributions add. + *

      + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + *

      + *

      + * + *
      * - * @param data type for {@code output()} output - * @param resource - * @param indices - * @param dtype + * @param data type for {@code outputRef()} output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to subtract from `ref`. * @param options carries optional attributes values - * @return a new instance of ResourceGather - */ - public ResourceGather resourceGather(Operand resource, - Operand indices, DataType dtype, ResourceGather.Options... options) { - return ResourceGather.create(scope, resource, indices, dtype, options); - } - - /** - * - * @param data type for {@code output()} output - * @param resource - * @param indices - * @param dtype - * @return a new instance of ResourceGatherNd + * @return a new instance of ScatterSub */ - public ResourceGatherNd resourceGatherNd( - Operand resource, Operand indices, DataType dtype) { - return ResourceGatherNd.create(scope, resource, indices, dtype); + public ScatterSub scatterSub(Operand ref, + Operand indices, Operand updates, ScatterSub.Options... options) { + return ScatterSub.create(scope, ref, indices, updates, options); } /** - * Adds sparse updates to the variable referenced by `resource`. + * Applies sparse updates to a variable reference. *

      * This operation computes - *

      + *

      {@code
          *      # Scalar indices
      -   *      ref[indices, ...] += updates[...]
      -   *  

      + * ref[indices, ...] = updates[...] + * * # Vector indices (for each i) - * ref[indices[i], ...] += updates[i, ...] - *

      + * ref[indices[i], ...] = updates[i, ...] + * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] + * }

      + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions add. + * If values in `ref` is to be updated more than once, because there are + * duplicate entries in `indices`, the order at which the updates happen + * for each value is undefined. *

      * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *

      *

      - * + * *
      + *

      + * See also `tf.batch_scatter_update` and `tf.scatter_nd_update`. * - * @param resource Should be from a `Variable` node. + * @param data type for {@code outputRef()} output + * @param ref Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. - * @return a new instance of ResourceScatterAdd + * @param updates A tensor of updated values to store in `ref`. + * @param options carries optional attributes values + * @return a new instance of ScatterUpdate */ - public ResourceScatterAdd resourceScatterAdd( - Operand resource, Operand indices, Operand updates) { - return ResourceScatterAdd.create(scope, resource, indices, updates); + public ScatterUpdate scatterUpdate(Operand ref, + Operand indices, Operand updates, ScatterUpdate.Options... options) { + return ScatterUpdate.create(scope, ref, indices, updates, options); } /** - * Divides sparse updates into the variable referenced by `resource`. - *

      - * This operation computes - *

      - * # Scalar indices - * ref[indices, ...] /= updates[...] - *

      - * # Vector indices (for each i) - * ref[indices[i], ...] /= updates[i, ...] - *

      - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] - *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions multiply. - *

      - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - *

      - *

      - * - *
      * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. - * @return a new instance of ResourceScatterDiv + * @param data type for {@code output()} output + * @param condition + * @param t + * @param e + * @return a new instance of Select */ - public ResourceScatterDiv resourceScatterDiv( - Operand resource, Operand indices, Operand updates) { - return ResourceScatterDiv.create(scope, resource, indices, updates); + public Select select(Operand condition, Operand t, Operand e) { + return Select.create(scope, condition, t, e); } /** - * Reduces sparse updates into the variable referenced by `resource` using the `max` operation. - *

      - * This operation computes - *

      - * # Scalar indices - * ref[indices, ...] = max(ref[indices, ...], updates[...]) - *

      - * # Vector indices (for each i) - * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) - *

      - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + * Computes the difference between two lists of numbers or strings. *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions are combined. + * Given a list `x` and a list `y`, this operation returns a list `out` that + * represents all values that are in `x` but not in `y`. The returned list `out` + * is sorted in the same order that the numbers appear in `x` (duplicates are + * preserved). This operation also returns a list `idx` that represents the + * position of each `out` element in `x`. In other words: *

      - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` *

      - *

      - * - *
      + * For example, given this input: + *
      {@code
      +   *  x = [1, 2, 3, 4, 5, 6]
      +   *  y = [1, 3, 5]
      +   *  }
      + * This operation would return: + *
      {@code
      +   *  out ==> [2, 4, 6]
      +   *  idx ==> [1, 3, 5]
      +   *  }
      * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. - * @return a new instance of ResourceScatterMax + * @param data type for {@code out()} output + * @param data type for {@code idx()} output + * @param x 1-D. Values to keep. + * @param y 1-D. Values to remove. + * @return a new instance of SetDiff1d */ - public ResourceScatterMax resourceScatterMax( - Operand resource, Operand indices, Operand updates) { - return ResourceScatterMax.create(scope, resource, indices, updates); + public SetDiff1d setDiff1d(Operand x, Operand y) { + return SetDiff1d.create(scope, x, y); } /** - * Reduces sparse updates into the variable referenced by `resource` using the `min` operation. - *

      - * This operation computes - *

      - * # Scalar indices - * ref[indices, ...] = min(ref[indices, ...], updates[...]) - *

      - * # Vector indices (for each i) - * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) - *

      - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + * Computes the difference between two lists of numbers or strings. *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions are combined. + * Given a list `x` and a list `y`, this operation returns a list `out` that + * represents all values that are in `x` but not in `y`. The returned list `out` + * is sorted in the same order that the numbers appear in `x` (duplicates are + * preserved). This operation also returns a list `idx` that represents the + * position of each `out` element in `x`. In other words: *

      - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` *

      - *

      - * - *
      + * For example, given this input: + *
      {@code
      +   *  x = [1, 2, 3, 4, 5, 6]
      +   *  y = [1, 3, 5]
      +   *  }
      + * This operation would return: + *
      {@code
      +   *  out ==> [2, 4, 6]
      +   *  idx ==> [1, 3, 5]
      +   *  }
      * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. - * @return a new instance of ResourceScatterMin + * @param data type for {@code out()} output + * @param data type for {@code idx()} output + * @param x 1-D. Values to keep. + * @param y 1-D. Values to remove. + * @param outIdx + * @return a new instance of SetDiff1d */ - public ResourceScatterMin resourceScatterMin( - Operand resource, Operand indices, Operand updates) { - return ResourceScatterMin.create(scope, resource, indices, updates); + public SetDiff1d setDiff1d(Operand x, Operand y, + DataType outIdx) { + return SetDiff1d.create(scope, x, y, outIdx); } /** - * Multiplies sparse updates into the variable referenced by `resource`. - *

      - * This operation computes - *

      - * # Scalar indices - * ref[indices, ...] *= updates[...] - *

      - * # Vector indices (for each i) - * ref[indices[i], ...] *= updates[i, ...] - *

      - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] - *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions multiply. + * Number of unique elements along last dimension of input `set`. *

      - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`, + * and `set_shape`. The last dimension contains values in a set, duplicates are + * allowed but ignored. *

      - *

      - * - *
      + * If `validate_indices` is `True`, this op validates the order and range of `set` + * indices. * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. - * @return a new instance of ResourceScatterMul + * @param setIndices 2D `Tensor`, indices of a `SparseTensor`. + * @param setValues 1D `Tensor`, values of a `SparseTensor`. + * @param setShape 1D `Tensor`, shape of a `SparseTensor`. + * @param options carries optional attributes values + * @return a new instance of SetSize */ - public ResourceScatterMul resourceScatterMul( - Operand resource, Operand indices, Operand updates) { - return ResourceScatterMul.create(scope, resource, indices, updates); + public SetSize setSize(Operand setIndices, Operand setValues, + Operand setShape, SetSize.Options... options) { + return SetSize.create(scope, setIndices, setValues, setShape, options); } /** - * Applies sparse addition to individual values or slices in a Variable. - *

      - * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - *

      - * `indices` must be integer tensor, containing indices into `ref`. - * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + * Returns the shape of a tensor. *

      - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - * dimension of `ref`. + * This operation returns a 1-D integer tensor representing the shape of `input`. *

      - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - *

      {@code
      -   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
      -   *  }
      - * For example, say we want to add 4 scattered elements to a rank-1 tensor to - * 8 elements. In Python, that addition would look like this: + * For example: *
      {@code
      -   *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
      -   *  indices = tf.constant([[4], [3], [1], [7]])
      -   *  updates = tf.constant([9, 10, 11, 12])
      -   *  add = tf.scatter_nd_add(ref, indices, updates)
      -   *  with tf.Session() as sess:
      -   *    print sess.run(add)
      +   *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
      +   *  shape(t) ==> [2, 2, 3]
          *  }
      - * The resulting update to ref would look like this: - *

      - * [1, 13, 3, 14, 14, 6, 7, 20] - *

      - * See `tf.scatter_nd` for more details about how to make updates to - * slices. * - * @param ref A resource handle. Must be from a VarHandleOp. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of - * values to add to ref. - * @param options carries optional attributes values - * @return a new instance of ResourceScatterNdAdd + * @param data type for {@code output()} output + * @param input + * @return a new instance of Shape */ - public ResourceScatterNdAdd resourceScatterNdAdd( - Operand ref, Operand indices, Operand updates, - ResourceScatterNdAdd.Options... options) { - return ResourceScatterNdAdd.create(scope, ref, indices, updates, options); + public org.tensorflow.op.core.Shape shape(Operand input) { + return org.tensorflow.op.core.Shape.create(scope, input); } /** - * Applies sparse subtraction to individual values or slices in a Variable. - *

      - * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - *

      - * `indices` must be integer tensor, containing indices into `ref`. - * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + * Returns the shape of a tensor. *

      - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - * dimension of `ref`. + * This operation returns a 1-D integer tensor representing the shape of `input`. *

      - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * For example: *

      {@code
      -   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
      +   *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
      +   *  shape(t) ==> [2, 2, 3]
          *  }
      - * For example, say we want to subtract 4 scattered elements from a rank-1 tensor - * with 8 elements. In Python, that subtraction would look like this: - *
      {@code
      -   *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
      -   *  indices = tf.constant([[4], [3], [1], [7]])
      -   *  updates = tf.constant([9, 10, 11, 12])
      -   *  sub = tf.scatter_nd_sub(ref, indices, updates)
      -   *  with tf.Session() as sess:
      -   *    print sess.run(sub)
      -   *  }
      - * The resulting update to ref would look like this: - *

      - * [1, -9, 3, -6, -4, 6, 7, -4] - *

      - * See `tf.scatter_nd` for more details about how to make updates to - * slices. * - * @param ref A resource handle. Must be from a VarHandleOp. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of - * values to add to ref. - * @param options carries optional attributes values - * @return a new instance of ResourceScatterNdSub + * @param data type for {@code output()} output + * @param input + * @param outType + * @return a new instance of Shape */ - public ResourceScatterNdSub resourceScatterNdSub( - Operand ref, Operand indices, Operand updates, - ResourceScatterNdSub.Options... options) { - return ResourceScatterNdSub.create(scope, ref, indices, updates, options); + public org.tensorflow.op.core.Shape shape( + Operand input, DataType outType) { + return org.tensorflow.op.core.Shape.create(scope, input, outType); } /** - * Applies sparse `updates` to individual values or slices within a given - *

      - * variable according to `indices`. + * Returns shape of tensors. *

      - * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * This operation returns N 1-D integer tensors representing shape of `input[i]s`. + * + * @param data type for {@code output()} output + * @param input + * @return a new instance of ShapeN + */ + public ShapeN shapeN(Iterable> input) { + return ShapeN.create(scope, input); + } + + /** + * Returns shape of tensors. *

      - * `indices` must be integer tensor, containing indices into `ref`. - * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + * This operation returns N 1-D integer tensors representing shape of `input[i]s`. + * + * @param data type for {@code output()} output + * @param input + * @param outType + * @return a new instance of ShapeN + */ + public ShapeN shapeN(Iterable> input, + DataType outType) { + return ShapeN.create(scope, input, outType); + } + + /** + * Returns the size of a tensor. *

      - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - * dimension of `ref`. + * This operation returns an integer representing the number of elements in + * `input`. *

      - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - *

      {@code
      -   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
      -   *  }
      - * For example, say we want to update 4 scattered elements to a rank-1 tensor to - * 8 elements. In Python, that update would look like this: + * For example: *
      {@code
      -   *      ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
      -   *      indices = tf.constant([[4], [3], [1] ,[7]])
      -   *      updates = tf.constant([9, 10, 11, 12])
      -   *      update = tf.scatter_nd_update(ref, indices, updates)
      -   *      with tf.Session() as sess:
      -   *        print sess.run(update)
      +   *  # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
      +   *  size(t) ==> 12
          *  }
      - * The resulting update to ref would look like this: - *

      - * [1, 11, 3, 10, 9, 6, 7, 12] - *

      - * See `tf.scatter_nd` for more details about how to make updates to - * slices. * - * @param ref A resource handle. Must be from a VarHandleOp. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of updated - * values to add to ref. - * @param options carries optional attributes values - * @return a new instance of ResourceScatterNdUpdate + * @param data type for {@code output()} output + * @param input + * @return a new instance of Size */ - public ResourceScatterNdUpdate resourceScatterNdUpdate( - Operand ref, Operand indices, Operand updates, - ResourceScatterNdUpdate.Options... options) { - return ResourceScatterNdUpdate.create(scope, ref, indices, updates, options); + public Size size(Operand input) { + return Size.create(scope, input); } /** - * Subtracts sparse updates from the variable referenced by `resource`. - *

      - * This operation computes - *

      - * # Scalar indices - * ref[indices, ...] -= updates[...] - *

      - * # Vector indices (for each i) - * ref[indices[i], ...] -= updates[i, ...] - *

      - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] - *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions add. + * Returns the size of a tensor. *

      - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * This operation returns an integer representing the number of elements in + * `input`. *

      - *

      - * - *
      + * For example: + *
      {@code
      +   *  # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
      +   *  size(t) ==> 12
      +   *  }
      * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. - * @return a new instance of ResourceScatterSub + * @param data type for {@code output()} output + * @param input + * @param outType + * @return a new instance of Size */ - public ResourceScatterSub resourceScatterSub( - Operand resource, Operand indices, Operand updates) { - return ResourceScatterSub.create(scope, resource, indices, updates); + public Size size(Operand input, DataType outType) { + return Size.create(scope, input, outType); } /** - * Assigns sparse updates to the variable referenced by `resource`. - *

      - * This operation computes - *

      - * # Scalar indices - * ref[indices, ...] = updates[...] - *

      - * # Vector indices (for each i) - * ref[indices[i], ...] = updates[i, ...] - *

      - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] + * Parses a text file and creates a batch of examples. * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. - * @return a new instance of ResourceScatterUpdate + * @param filename The corpus's text file name. + * @param batchSize The size of produced batch. + * @param options carries optional attributes values + * @return a new instance of Skipgram */ - public ResourceScatterUpdate resourceScatterUpdate( - Operand resource, Operand indices, Operand updates) { - return ResourceScatterUpdate.create(scope, resource, indices, updates); + public Skipgram skipgram(String filename, Long batchSize, Skipgram.Options... options) { + return Skipgram.create(scope, filename, batchSize, options); } /** - * Assign `value` to the sliced l-value reference of `ref`. + * Return a slice from 'input'. *

      - * The values of `value` are assigned to the positions in the variable - * `ref` that are selected by the slice parameters. The slice parameters - * `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. + * The output tensor is a tensor with dimensions described by 'size' + * whose values are extracted from 'input' starting at the offsets in + * 'begin'. *

      - * NOTE this op currently does not support broadcasting and so `value`'s - * shape must be exactly the shape produced by the slice of `ref`. + * Requirements: + * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) * - * @param ref - * @param begin - * @param end - * @param strides - * @param value - * @param options carries optional attributes values - * @return a new instance of ResourceStridedSliceAssign + * @param data type for {@code output()} output + * @param input + * @param begin begin[i] specifies the offset into the 'i'th dimension of + * 'input' to slice from. + * @param size size[i] specifies the number of elements of the 'i'th dimension + * of 'input' to slice. If size[i] is -1, all remaining elements in dimension + * i are included in the slice (i.e. this is equivalent to setting + * size[i] = input.dim_size(i) - begin[i]). + * @return a new instance of Slice */ - public ResourceStridedSliceAssign resourceStridedSliceAssign( - Operand ref, Operand begin, Operand end, Operand strides, Operand value, - ResourceStridedSliceAssign.Options... options) { - return ResourceStridedSliceAssign.create(scope, ref, begin, end, strides, value, options); + public Slice slice(Operand input, Operand begin, + Operand size) { + return Slice.create(scope, input, begin, size); } /** - * Reverses specific dimensions of a tensor. + * Returns a copy of the input tensor. + * + * @param data type for {@code output()} output + * @param input + * @return a new instance of Snapshot + */ + public Snapshot snapshot(Operand input) { + return Snapshot.create(scope, input); + } + + /** + * SpaceToBatch for N-D tensors of type T. *

      - * NOTE `tf.reverse` has now changed behavior in preparation for 1.0. - * `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0. + * This operation divides "spatial" dimensions `[1, ..., M]` of the input into a + * grid of blocks of shape `block_shape`, and interleaves these blocks with the + * "batch" dimension (0) such that in the output, the spatial dimensions + * `[1, ..., M]` correspond to the position within the grid, and the batch + * dimension combines both the position within a spatial block and the original + * batch position. Prior to division into blocks, the spatial dimensions of the + * input are optionally zero padded according to `paddings`. See below for a + * precise description. + * + * @param data type for {@code output()} output + * @param input N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, + * where spatial_shape has `M` dimensions. + * @param blockShape 1-D with shape `[M]`, all values must be >= 1. + * @param paddings 2-D with shape `[M, 2]`, all values must be >= 0. + * `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension + * `i + 1`, which corresponds to spatial dimension `i`. It is required that + * `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. *

      - * Given a `tensor`, and a `int32` tensor `axis` representing the set of - * dimensions of `tensor` to reverse. This operation reverses each dimension - * `i` for which there exists `j` s.t. `axis[j] == i`. + * This operation is equivalent to the following steps: *

      - * `tensor` can have up to 8 dimensions. The number of dimensions specified - * in `axis` may be 0 or more entries. If an index is specified more than - * once, a InvalidArgument error is raised. + * 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the + * input according to `paddings` to produce `padded` of shape `padded_shape`. *

      - * For example: - *

      {@code
      -   *  # tensor 't' is [[[[ 0,  1,  2,  3],
      -   *  #                  [ 4,  5,  6,  7],
      -   *  #                  [ 8,  9, 10, 11]],
      -   *  #                 [[12, 13, 14, 15],
      -   *  #                  [16, 17, 18, 19],
      -   *  #                  [20, 21, 22, 23]]]]
      -   *  # tensor 't' shape is [1, 2, 3, 4]
      -   *
      -   *  # 'dims' is [3] or 'dims' is [-1]
      -   *  reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
      -   *                          [ 7,  6,  5,  4],
      -   *                          [ 11, 10, 9, 8]],
      -   *                         [[15, 14, 13, 12],
      -   *                          [19, 18, 17, 16],
      -   *                          [23, 22, 21, 20]]]]
      -   *
      -   *  # 'dims' is '[1]' (or 'dims' is '[-3]')
      -   *  reverse(t, dims) ==> [[[[12, 13, 14, 15],
      -   *                          [16, 17, 18, 19],
      -   *                          [20, 21, 22, 23]
      -   *                         [[ 0,  1,  2,  3],
      -   *                          [ 4,  5,  6,  7],
      -   *                          [ 8,  9, 10, 11]]]]
      -   *
      -   *  # 'dims' is '[2]' (or 'dims' is '[-2]')
      -   *  reverse(t, dims) ==> [[[[8, 9, 10, 11],
      -   *                          [4, 5, 6, 7],
      -   *                          [0, 1, 2, 3]]
      -   *                         [[20, 21, 22, 23],
      -   *                          [16, 17, 18, 19],
      -   *                          [12, 13, 14, 15]]]]
      +   *  2. Reshape `padded` to `reshaped_padded` of shape:
      +   *  

      + * [batch] + + * [padded_shape[1] / block_shape[0], + * block_shape[0], + * ..., + * padded_shape[M] / block_shape[M-1], + * block_shape[M-1]] + + * remaining_shape + *

      + * 3. Permute dimensions of `reshaped_padded` to produce + * `permuted_reshaped_padded` of shape: + *

      + * block_shape + + * [batch] + + * [padded_shape[1] / block_shape[0], + * ..., + * padded_shape[M] / block_shape[M-1]] + + * remaining_shape + *

      + * 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch + * dimension, producing an output tensor of shape: + *

      + * [batch * prod(block_shape)] + + * [padded_shape[1] / block_shape[0], + * ..., + * padded_shape[M] / block_shape[M-1]] + + * remaining_shape + *

      + * Some examples: + *

      + * (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and + * `paddings = [[0, 0], [0, 0]]`: + *

      {@code
      +   *  x = [[[[1], [2]], [[3], [4]]]]
      +   *  }
      + * The output tensor has shape `[4, 1, 1, 1]` and value: + *
      {@code
      +   *  [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
          *  }
      + * (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and + * `paddings = [[0, 0], [0, 0]]`: + *
      {@code
      +   *  x = [[[[1, 2, 3], [4, 5, 6]],
      +   *        [[7, 8, 9], [10, 11, 12]]]]
      +   *  }
      + * The output tensor has shape `[4, 1, 1, 3]` and value: + *
      {@code
      +   *  [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
      +   *  }
      + * (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and + * `paddings = [[0, 0], [0, 0]]`: + *
      {@code
      +   *  x = [[[[1],   [2],  [3],  [4]],
      +   *        [[5],   [6],  [7],  [8]],
      +   *        [[9],  [10], [11],  [12]],
      +   *        [[13], [14], [15],  [16]]]]
      +   *  }
      + * The output tensor has shape `[4, 2, 2, 1]` and value: + *
      {@code
      +   *  x = [[[[1], [3]], [[9], [11]]],
      +   *       [[[2], [4]], [[10], [12]]],
      +   *       [[[5], [7]], [[13], [15]]],
      +   *       [[[6], [8]], [[14], [16]]]]
      +   *  }
      + * (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and + * paddings = `[[0, 0], [2, 0]]`: + *
      {@code
      +   *  x = [[[[1],   [2],  [3],  [4]],
      +   *        [[5],   [6],  [7],  [8]]],
      +   *       [[[9],  [10], [11],  [12]],
      +   *        [[13], [14], [15],  [16]]]]
      +   *  }
      + * The output tensor has shape `[8, 1, 3, 1]` and value: + *
      {@code
      +   *  x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
      +   *       [[[0], [2], [4]]], [[[0], [10], [12]]],
      +   *       [[[0], [5], [7]]], [[[0], [13], [15]]],
      +   *       [[[0], [6], [8]]], [[[0], [14], [16]]]]
      +   *  }
      + * Among others, this operation is useful for reducing atrous convolution into + * regular convolution. + * @return a new instance of SpaceToBatchNd + */ + public SpaceToBatchNd spaceToBatchNd( + Operand input, Operand blockShape, Operand paddings) { + return SpaceToBatchNd.create(scope, input, blockShape, paddings); + } + + /** + * Splits a tensor into `num_split` tensors along one dimension. * * @param data type for {@code output()} output - * @param tensor Up to 8-D. - * @param axis 1-D. The indices of the dimensions to reverse. Must be in the range - * `[-rank(tensor), rank(tensor))`. - * @return a new instance of Reverse + * @param axis 0-D. The dimension along which to split. Must be in the range + * `[-rank(value), rank(value))`. + * @param value The tensor to split. + * @param numSplit The number of ways to split. Must evenly divide + * `value.shape[split_dim]`. + * @return a new instance of Split */ - public Reverse reverse(Operand tensor, - Operand axis) { - return Reverse.create(scope, tensor, axis); + public Split split(Operand axis, Operand value, Long numSplit) { + return Split.create(scope, axis, value, numSplit); } /** - * Reverses variable length slices. - *

      - * This op first slices `input` along the dimension `batch_dim`, and for each - * slice `i`, reverses the first `seq_lengths[i]` elements along - * the dimension `seq_dim`. - *

      - * The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, - * and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. + * Splits a tensor into `num_split` tensors along one dimension. + * + * @param data type for {@code output()} output + * @param value The tensor to split. + * @param sizeSplits list containing the sizes of each output tensor along the split + * dimension. Must sum to the dimension of value along split_dim. + * Can contain one -1 indicating that dimension is to be inferred. + * @param axis 0-D. The dimension along which to split. Must be in the range + * `[-rank(value), rank(value))`. + * @param numSplit + * @return a new instance of SplitV + */ + public SplitV splitV(Operand value, + Operand sizeSplits, Operand axis, Long numSplit) { + return SplitV.create(scope, value, sizeSplits, axis, numSplit); + } + + /** + * Removes dimensions of size 1 from the shape of a tensor. *

      - * The output slice `i` along dimension `batch_dim` is then given by input - * slice `i`, with the first `seq_lengths[i]` slices along dimension - * `seq_dim` reversed. + * Given a tensor `input`, this operation returns a tensor of the same type with + * all dimensions of size 1 removed. If you don't want to remove all size 1 + * dimensions, you can remove specific size 1 dimensions by specifying + * `axis`. *

      * For example: *

      {@code
      -   *  # Given this:
      -   *  batch_dim = 0
      -   *  seq_dim = 1
      -   *  input.dims = (4, 8, ...)
      -   *  seq_lengths = [7, 2, 3, 5]
      -   *
      -   *  # then slices of input are reversed on seq_dim, but only up to seq_lengths:
      -   *  output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
      -   *  output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
      -   *  output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
      -   *  output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
      -   *
      -   *  # while entries past seq_lens are copied through:
      -   *  output[0, 7:, :, ...] = input[0, 7:, :, ...]
      -   *  output[1, 2:, :, ...] = input[1, 2:, :, ...]
      -   *  output[2, 3:, :, ...] = input[2, 3:, :, ...]
      -   *  output[3, 2:, :, ...] = input[3, 2:, :, ...]
      +   *  # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
      +   *  shape(squeeze(t)) ==> [2, 3]
          *  }
      - * In contrast, if: + * Or, to remove specific size 1 dimensions: *
      {@code
      -   *  # Given this:
      -   *  batch_dim = 2
      -   *  seq_dim = 0
      -   *  input.dims = (8, ?, 4, ...)
      -   *  seq_lengths = [7, 2, 3, 5]
      -   *
      -   *  # then slices of input are reversed on seq_dim, but only up to seq_lengths:
      -   *  output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
      -   *  output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
      -   *  output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
      -   *  output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
      -   *
      -   *  # while entries past seq_lens are copied through:
      -   *  output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
      -   *  output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
      -   *  output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
      -   *  output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
      +   *  # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
      +   *  shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
          *  }
      * * @param data type for {@code output()} output - * @param input The input to reverse. - * @param seqLengths 1-D with length `input.dims(batch_dim)` and - * `max(seq_lengths) <= input.dims(seq_dim)` - * @param seqDim The dimension which is partially reversed. + * @param input The `input` to squeeze. * @param options carries optional attributes values - * @return a new instance of ReverseSequence + * @return a new instance of Squeeze */ - public ReverseSequence reverseSequence(Operand input, - Operand seqLengths, Long seqDim, ReverseSequence.Options... options) { - return ReverseSequence.create(scope, input, seqLengths, seqDim, options); + public Squeeze squeeze(Operand input, Squeeze.Options... options) { + return Squeeze.create(scope, input, options); } /** - * Rolls the elements of a tensor along an axis. + * Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. *

      - * The elements are shifted positively (towards larger indices) by the offset of - * `shift` along the dimension of `axis`. Negative `shift` values will shift - * elements in the opposite direction. Elements that roll passed the last position - * will wrap around to the first and vice versa. Multiple shifts along multiple - * axes may be specified. + * Packs the `N` tensors in `values` into a tensor with rank one higher than each + * tensor in `values`, by packing them along the `axis` dimension. + * Given a list of tensors of shape `(A, B, C)`; + *

      + * if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. + * if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. + * Etc. *

      * For example: *

      {@code
      -   *  # 't' is [0, 1, 2, 3, 4]
      -   *  roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]
      -   *
      -   *  # shifting along multiple dimensions
      -   *  # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
      -   *  roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]
      -   *
      -   *  # shifting along the same axis multiple times
      -   *  # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
      -   *  roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]
      +   *  # 'x' is [1, 4]
      +   *  # 'y' is [2, 5]
      +   *  # 'z' is [3, 6]
      +   *  pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
      +   *  pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
          *  }
      + * This is the opposite of `unpack`. * * @param data type for {@code output()} output - * @param input - * @param shift Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which - * elements are shifted positively (towards larger indices) along the dimension - * specified by `axis[i]`. Negative shifts will roll the elements in the opposite - * direction. - * @param axis Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift - * `shift[i]` should occur. If the same axis is referenced more than once, the - * total shift for that axis will be the sum of all the shifts that belong to that - * axis. - * @return a new instance of Roll + * @param values Must be of same shape and type. + * @param options carries optional attributes values + * @return a new instance of Stack */ - public Roll roll(Operand input, - Operand shift, Operand axis) { - return Roll.create(scope, input, shift, axis); + public Stack stack(Iterable> values, Stack.Options... options) { + return Stack.create(scope, values, options); } /** - * Perform batches of RPC requests. - *

      - * This op asynchronously performs either a single RPC request, or a batch - * of requests. RPC requests are defined by three main parameters: - *

      - * - `address` (the host+port or BNS address of the request) - * - `method` (the RPC method name for the request) - * - `request` (the serialized proto string, or vector of strings, - * of the RPC request argument). - *

      - * For example, if you have an RPC service running on port localhost:2345, - * and its interface is configured with the following proto declaration: - *

      {@code
      -   *  service MyService {
      -   *    rpc MyMethod(MyRequestProto) returns (MyResponseProto) {
      -   *    }
      -   *  };
      -   *  }
      - * then call this op with arguments: - *
      {@code
      -   *  address = "localhost:2345"
      -   *  method = "MyService/MyMethod"
      -   *  }
      - * The `request` tensor is a string tensor representing serialized `MyRequestProto` - * strings; and the output string tensor `response` will have the same shape - * and contain (upon successful completion) corresponding serialized - * `MyResponseProto` strings. - *

      - * For example, to send a single, empty, `MyRequestProto`, call - * this op with `request = ""`. To send 5 parallel empty requests, - * call this op with `request = ["", "", "", "", ""]`. - *

      - * More generally, one can create a batch of `MyRequestProto` serialized protos - * from regular batched tensors using the `encode_proto` op, and convert - * the response `MyResponseProto` serialized protos to batched tensors - * using the `decode_proto` op. - *

      - * NOTE Working with serialized proto strings is faster than instantiating - * actual proto objects in memory, so no performance degradation is expected - * compared to writing custom kernels for this workflow. - *

      - * If the connection fails or the remote worker returns an error - * status, the op reraises this exception locally. + * Stage values similar to a lightweight Enqueue. *

      - * See the `TryRpc` op if you prefer to handle RPC failures manually in the graph. + * The basic functionality of this Op is similar to a queue with many + * fewer capabilities and options. This Op is optimized for performance. * - * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `method` and `request`. - * @param method `0-D` or `1-D`. The method address on the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `address` and `request`. - * @param request `0-D` or `1-D`. Serialized proto strings: the rpc request argument. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `address` and `method`. + * @param values a list of tensors + * dtypes A list of data types that inserted values should adhere to. * @param options carries optional attributes values - * @return a new instance of Rpc + * @return a new instance of Stage */ - public Rpc rpc(Operand address, Operand method, Operand request, - Rpc.Options... options) { - return Rpc.create(scope, address, method, request, options); + public Stage stage(Iterable> values, Stage.Options... options) { + return Stage.create(scope, values, options); } /** - * Adds sparse updates to a variable reference. - *

      - * This operation computes - *

      - * # Scalar indices - * ref[indices, ...] += updates[...] - *

      - * # Vector indices (for each i) - * ref[indices[i], ...] += updates[i, ...] - *

      - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] - *

      - * This operation outputs `ref` after the update is done. - * This makes it easier to chain operations that need to use the reset value. - *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions add. - *

      - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - *

      - *

      - * - *
      + * Op removes all elements in the underlying container. * - * @param data type for {@code outputRef()} output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. + * @param dtypes * @param options carries optional attributes values - * @return a new instance of ScatterAdd + * @return a new instance of StageClear */ - public ScatterAdd scatterAdd(Operand ref, - Operand indices, Operand updates, ScatterAdd.Options... options) { - return ScatterAdd.create(scope, ref, indices, updates, options); + public StageClear stageClear(List> dtypes, StageClear.Options... options) { + return StageClear.create(scope, dtypes, options); } /** - * Divides a variable reference by sparse updates. - *

      - * This operation computes - *

      {@code
      -   *      # Scalar indices
      -   *      ref[indices, ...] /= updates[...]
      -   *
      -   *      # Vector indices (for each i)
      -   *      ref[indices[i], ...] /= updates[i, ...]
      -   *
      -   *      # High rank indices (for each i, ..., j)
      -   *      ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
      -   *  }
      - * This operation outputs `ref` after the update is done. - * This makes it easier to chain operations that need to use the reset value. - *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions divide. + * Op peeks at the values at the specified index. If the *

      - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * underlying container does not contain sufficient elements + * this op will block until it does. This Op is optimized for + * performance. * - * @param data type for {@code outputRef()} output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of values that `ref` is divided by. + * @param index + * @param dtypes * @param options carries optional attributes values - * @return a new instance of ScatterDiv + * @return a new instance of StagePeek */ - public ScatterDiv scatterDiv(Operand ref, - Operand indices, Operand updates, ScatterDiv.Options... options) { - return ScatterDiv.create(scope, ref, indices, updates, options); + public StagePeek stagePeek(Operand index, List> dtypes, + StagePeek.Options... options) { + return StagePeek.create(scope, index, dtypes, options); } /** - * Reduces sparse updates into a variable reference using the `max` operation. - *

      - * This operation computes - *

      - * # Scalar indices - * ref[indices, ...] = max(ref[indices, ...], updates[...]) - *

      - * # Vector indices (for each i) - * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) - *

      - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) - *

      - * This operation outputs `ref` after the update is done. - * This makes it easier to chain operations that need to use the reset value. - *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions combine. - *

      - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - *

      - *

      - * - *
      + * Op returns the number of elements in the underlying container. * - * @param data type for {@code outputRef()} output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to reduce into `ref`. + * @param dtypes * @param options carries optional attributes values - * @return a new instance of ScatterMax + * @return a new instance of StageSize */ - public ScatterMax scatterMax(Operand ref, - Operand indices, Operand updates, ScatterMax.Options... options) { - return ScatterMax.create(scope, ref, indices, updates, options); + public StageSize stageSize(List> dtypes, StageSize.Options... options) { + return StageSize.create(scope, dtypes, options); } /** - * Reduces sparse updates into a variable reference using the `min` operation. - *

      - * This operation computes - *

      - * # Scalar indices - * ref[indices, ...] = min(ref[indices, ...], updates[...]) - *

      - * # Vector indices (for each i) - * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) - *

      - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) - *

      - * This operation outputs `ref` after the update is done. - * This makes it easier to chain operations that need to use the reset value. + * Stops gradient computation. *

      - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions combine. + * When executed in a graph, this op outputs its input tensor as-is. *

      - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * When building ops to compute gradients, this op prevents the contribution of + * its inputs to be taken into account. Normally, the gradient generator adds ops + * to a graph to compute the derivatives of a specified 'loss' by recursively + * finding out inputs that contributed to its computation. If you insert this op + * in the graph it inputs are masked from the gradient generator. They are not + * taken into account for computing gradients. *

      - *

      - * - *
      + * This is useful any time you want to compute a value with TensorFlow but need + * to pretend that the value was a constant. Some examples include: + *
        + *
      • + * The EM algorithm where the M-step should not involve backpropagation + * through the output of the E-step. + *
      • + *
      • + * Contrastive divergence training of Boltzmann machines where, when + * differentiating the energy function, the training must not backpropagate + * through the graph that generated the samples from the model. + *
      • + *
      • + * Adversarial training, where no backprop should happen through the adversarial + * example generation process. * - * @param data type for {@code outputRef()} output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to reduce into `ref`. - * @param options carries optional attributes values - * @return a new instance of ScatterMin + * @param data type for {@code output()} output + * @param input + * @return a new instance of StopGradient */ - public ScatterMin scatterMin(Operand ref, - Operand indices, Operand updates, ScatterMin.Options... options) { - return ScatterMin.create(scope, ref, indices, updates, options); + public StopGradient stopGradient(Operand input) { + return StopGradient.create(scope, input); } /** - * Multiplies sparse updates into a variable reference. + * Return a strided slice from `input`. *

        - * This operation computes - *

        {@code
        -   *      # Scalar indices
        -   *      ref[indices, ...] *= updates[...]
        -   *
        -   *      # Vector indices (for each i)
        -   *      ref[indices[i], ...] *= updates[i, ...]
        -   *
        -   *      # High rank indices (for each i, ..., j)
        -   *      ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
        -   *  }
        - * This operation outputs `ref` after the update is done. - * This makes it easier to chain operations that need to use the reset value. - *

        - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their contributions multiply. - *

        - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * - * @param data type for {@code outputRef()} output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to multiply to `ref`. - * @param options carries optional attributes values - * @return a new instance of ScatterMul - */ - public ScatterMul scatterMul(Operand ref, - Operand indices, Operand updates, ScatterMul.Options... options) { - return ScatterMul.create(scope, ref, indices, updates, options); - } - - /** - * Scatter `updates` into a new tensor according to `indices`. - *

        - * Creates a new tensor by applying sparse `updates` to individual values or - * slices within a tensor (initially zero for numeric, empty for string) of - * the given `shape` according to indices. This operator is the inverse of the - * `tf.gather_nd` operator which extracts values or slices from a given tensor. - *

        - * This operation is similar to tensor_scatter_add, except that the tensor is - * zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical - * to `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)` - *

        - * If `indices` contains duplicates, then their updates are accumulated (summed). - *

        - * WARNING: The order in which updates are applied is nondeterministic, so the - * output will be nondeterministic if `indices` contains duplicates -- because - * of some numerical approximation issues, numbers summed in different order - * may yield different results. - *

        - * `indices` is an integer tensor containing indices into a new tensor of shape - * `shape`. The last dimension of `indices` can be at most the rank of `shape`: + * Note, most python users will want to use the Python `Tensor.__getitem__` + * or `Variable.__getitem__` rather than this op directly. *

        - * indices.shape[-1] <= shape.rank + * The goal of this op is to produce a new tensor with a subset of + * the elements from the `n` dimensional `input` tensor. The subset is chosen using + * a sequence of `m` sparse range specifications encoded into the arguments + * of this function. Note, in some cases + * `m` could be equal to `n`, but this need not be the case. Each + * range specification entry can be one of the following: *

        - * The last dimension of `indices` corresponds to indices into elements - * (if `indices.shape[-1] = shape.rank`) or slices - * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of - * `shape`. `updates` is a tensor with shape + * - An ellipsis (...). Ellipses are used to imply zero or more + * dimensions of full-dimension selection and are produced using + * `ellipsis_mask`. For example, `foo[...]` is the identity slice. *

        - * indices.shape[:-1] + shape[indices.shape[-1]:] + * - A new axis. This is used to insert a new shape=1 dimension and is + * produced using `new_axis_mask`. For example, `foo[:, ...]` where + * `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor. *

        - * The simplest form of scatter is to insert individual elements in a tensor by - * index. For example, say we want to insert 4 scattered elements in a rank-1 - * tensor with 8 elements. + * - A range `begin:end:stride`. This is used to specify how much to choose from + * a given dimension. `stride` can be any integer but 0. `begin` is an integer + * which represents the index of the first value to select while `end` represents + * the index of the last value to select. The number of values selected in each + * dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`. + * `begin` and `end` can be negative where `-1` is the last element, `-2` is + * the second to last. `begin_mask` controls whether to replace the explicitly + * given `begin` with an implicit effective value of `0` if `stride > 0` and + * `-1` if `stride < 0`. `end_mask` is analogous but produces the number + * required to create the largest open interval. For example, given a shape + * `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do + * not assume this is equivalent to `foo[0:-1]` which has an effective `begin` + * and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the + * first dimension of a tensor while dropping the last two (in the original + * order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`. *

        - *

        - * - *
        + * - A single index. This is used to keep only elements that have a given + * index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a + * shape `(6,)` tensor. This is encoded in `begin` and `end` and + * `shrink_axis_mask`. *

        - * In Python, this scatter operation would look like this: + * Each conceptual range specification is encoded in the op's argument. This + * encoding is best understand by considering a non-trivial example. In + * particular, + * `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as *

        {@code
        -   *      indices = tf.constant([[4], [3], [1], [7]])
        -   *      updates = tf.constant([9, 10, 11, 12])
        -   *      shape = tf.constant([8])
        -   *      scatter = tf.scatter_nd(indices, updates, shape)
        -   *      print(scatter)
        +   *  begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
        +   *  end = [2, 4, x, x, -3, x]
        +   *  strides = [1, 1, x, x, -1, 1]
        +   *  begin_mask = 1<<4 | 1 << 5 = 48
        +   *  end_mask = 1<<5 = 32
        +   *  ellipsis_mask = 1<<3 = 8
        +   *  new_axis_mask = 1<<2 4
        +   *  shrink_axis_mask = 1<<0
            *  }
        - * The resulting tensor would look like this: + * In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of + * the slice becomes (2, 1, 5, 5, 2, 5). + * Let us walk step by step through each argument specification. *

        - * [0, 11, 0, 10, 9, 0, 0, 12] + * 1. The first argument in the example slice is turned into `begin = 1` and + * `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we + * also set the appropriate bit in `shrink_axis_mask`. *

        - * We can also, insert entire slices of a higher rank tensor all at once. For - * example, if we wanted to insert two slices in the first dimension of a - * rank-3 tensor with two matrices of new values. + * 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have + * zero bits contributed. *

        - *

        - * - *
        + * 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 + * dimension in the final shape. Dummy values are contributed to begin, + * end and stride, while the new_axis_mask bit is set. *

        - * In Python, this scatter operation would look like this: - *

        {@code
        -   *      indices = tf.constant([[0], [2]])
        -   *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
        -   *                              [7, 7, 7, 7], [8, 8, 8, 8]],
        -   *                             [[5, 5, 5, 5], [6, 6, 6, 6],
        -   *                              [7, 7, 7, 7], [8, 8, 8, 8]]])
        -   *      shape = tf.constant([4, 4, 4])
        -   *      scatter = tf.scatter_nd(indices, updates, shape)
        -   *      print(scatter)
        -   *  }
        - * The resulting tensor would look like this: + * 4. `...` grab the full ranges from as many dimensions as needed to + * fully specify a slice for every dimension of the input shape. *

        - * [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], - * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] + * 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated + * with a dimension that has shape `s` is converted to a positive index + * `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion + * is done internally so begin, end and strides receive x, -3, and -1. + * The appropriate begin_mask bit is set to indicate the start range is the + * full range (ignoring the x). *

        - * Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, the index is ignored. + * 6. `:` indicates that the entire contents of the corresponding dimension + * is selected. This is equivalent to `::` or `0::1`. begin, end, and strides + * receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and + * `end_mask` are also set. + *

        + * Requirements: + * `0 != strides[i] for i in [0, m)` + * `ellipsis_mask must be a power of two (only one ellipsis)` * - * @param data type for {@code output()} output - * @param indices Index tensor. - * @param updates Updates to scatter into output. - * @param shape 1-D. The shape of the resulting tensor. - * @return a new instance of ScatterNd + * @param data type for {@code output()} output + * @param input + * @param begin `begin[k]` specifies the offset into the `k`th range specification. + * The exact dimension this corresponds to will be determined by context. + * Out-of-bounds values will be silently clamped. If the `k`th bit of + * `begin_mask` then `begin[k]` is ignored and the full range of the + * appropriate dimension is used instead. Negative values causes indexing + * to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`. + * @param end `end[i]` is like `begin` with the exception that `end_mask` is + * used to determine full ranges. + * @param strides `strides[i]` specifies the increment in the `i`th specification + * after extracting a given element. Negative indices will reverse + * the original order. Out or range values are + * clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0` + * @param options carries optional attributes values + * @return a new instance of StridedSlice */ - public ScatterNd scatterNd(Operand indices, - Operand updates, Operand shape) { - return ScatterNd.create(scope, indices, updates, shape); + public StridedSlice stridedSlice(Operand input, + Operand begin, Operand end, Operand strides, StridedSlice.Options... options) { + return StridedSlice.create(scope, input, begin, end, strides, options); } /** - * Applies sparse addition to individual values or slices in a Variable. - *

        - * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - *

        - * `indices` must be integer tensor, containing indices into `ref`. - * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - *

        - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - * dimension of `ref`. - *

        - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - *

        {@code
        -   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
        -   *  }
        - * For example, say we want to add 4 scattered elements to a rank-1 tensor to - * 8 elements. In Python, that addition would look like this: - *
        {@code
        -   *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
        -   *  indices = tf.constant([[4], [3], [1], [7]])
        -   *  updates = tf.constant([9, 10, 11, 12])
        -   *  add = tf.scatter_nd_add(ref, indices, updates)
        -   *  with tf.Session() as sess:
        -   *    print sess.run(add)
        -   *  }
        - * The resulting update to ref would look like this: + * Assign `value` to the sliced l-value reference of `ref`. *

        - * [1, 13, 3, 14, 14, 6, 7, 20] + * The values of `value` are assigned to the positions in the variable + * `ref` that are selected by the slice parameters. The slice parameters + * `begin`, `end`, `strides`, etc. work exactly as in `StridedSlice`. *

        - * See `tf.scatter_nd` for more details about how to make updates to - * slices. + * NOTE this op currently does not support broadcasting and so `value`'s + * shape must be exactly the shape produced by the slice of `ref`. * * @param data type for {@code outputRef()} output - * @param ref A mutable Tensor. Should be from a Variable node. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of updated values - * to add to ref. + * @param ref + * @param begin + * @param end + * @param strides + * @param value * @param options carries optional attributes values - * @return a new instance of ScatterNdAdd + * @return a new instance of StridedSliceAssign */ - public ScatterNdAdd scatterNdAdd(Operand ref, - Operand indices, Operand updates, ScatterNdAdd.Options... options) { - return ScatterNdAdd.create(scope, ref, indices, updates, options); + public StridedSliceAssign stridedSliceAssign( + Operand ref, Operand begin, Operand end, Operand strides, Operand value, + StridedSliceAssign.Options... options) { + return StridedSliceAssign.create(scope, ref, begin, end, strides, value, options); } /** - * Applies sparse addition to `input` using individual values or slices - *

        - * from `updates` according to indices `indices`. The updates are non-aliasing: - * `input` is only modified in-place if no other operations will use it. - * Otherwise, a copy of `input` is made. This operation has a gradient with - * respect to both `input` and `updates`. - *

        - * `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - *

        - * `indices` must be integer tensor, containing indices into `input`. - * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. + * Returns the gradient of `StridedSlice`. *

        - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or `(P-K)`-dimensional slices - * (if `K < P`) along the `K`th dimension of `input`. + * Since `StridedSlice` cuts out pieces of its `input` which is size + * `shape`, its gradient will have the same shape (which is passed here + * as `shape`). The gradient will be zero in any element that the slice + * does not select. *

        - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - *

        - * $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ - *

        - * For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 - * elements. In Python, that addition would look like this: - *

        - * input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) - * indices = tf.constant([[4], [3], [1], [7]]) - * updates = tf.constant([9, 10, 11, 12]) - * output = tf.scatter_nd_non_aliasing_add(input, indices, updates) - * with tf.Session() as sess: - * print(sess.run(output)) - *

        - * The resulting value `output` would look like this: - *

        - * [1, 13, 3, 14, 14, 6, 7, 20] - *

        - * See `tf.scatter_nd` for more details about how to make updates to slices. + * Arguments are the same as StridedSliceGrad with the exception that + * `dy` is the input gradient to be propagated and `shape` is the + * shape of `StridedSlice`'s `input`. * - * @param data type for {@code output()} output - * @param input A Tensor. - * @param indices A Tensor. Must be one of the following types: `int32`, `int64`. - * A tensor of indices into `input`. - * @param updates A Tensor. Must have the same type as ref. A tensor of updated values - * to add to `input`. - * @return a new instance of ScatterNdNonAliasingAdd + * @param data type for {@code output()} output + * @param shape + * @param begin + * @param end + * @param strides + * @param dy + * @param options carries optional attributes values + * @return a new instance of StridedSliceGrad */ - public ScatterNdNonAliasingAdd scatterNdNonAliasingAdd( - Operand input, Operand indices, Operand updates) { - return ScatterNdNonAliasingAdd.create(scope, input, indices, updates); + public StridedSliceGrad stridedSliceGrad(Operand shape, + Operand begin, Operand end, Operand strides, Operand dy, + StridedSliceGrad.Options... options) { + return StridedSliceGrad.create(scope, shape, begin, end, strides, dy, options); } /** - * Applies sparse subtraction to individual values or slices in a Variable. - *

        - * within a given variable according to `indices`. - *

        - * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - *

        - * `indices` must be integer tensor, containing indices into `ref`. - * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - *

        - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - * dimension of `ref`. - *

        - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - *

        {@code
        -   *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
        -   *  }
        - * For example, say we want to subtract 4 scattered elements from a rank-1 tensor - * with 8 elements. In Python, that subtraction would look like this: - *
        {@code
        -   *  ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
        -   *  indices = tf.constant([[4], [3], [1], [7]])
        -   *  updates = tf.constant([9, 10, 11, 12])
        -   *  sub = tf.scatter_nd_sub(ref, indices, updates)
        -   *  with tf.Session() as sess:
        -   *    print sess.run(sub)
        -   *  }
        - * The resulting update to ref would look like this: - *

        - * [1, -9, 3, -6, -4, 6, 7, -4] + * Computes the sum of elements across dimensions of a tensor. *

        - * See `tf.scatter_nd` for more details about how to make updates to - * slices. + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. * - * @param data type for {@code outputRef()} output - * @param ref A mutable Tensor. Should be from a Variable node. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of updated values - * to subtract from ref. + * @param data type for {@code output()} output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. * @param options carries optional attributes values - * @return a new instance of ScatterNdSub + * @return a new instance of Sum */ - public ScatterNdSub scatterNdSub(Operand ref, - Operand indices, Operand updates, ScatterNdSub.Options... options) { - return ScatterNdSub.create(scope, ref, indices, updates, options); + public Sum sum(Operand input, Operand axis, + Sum.Options... options) { + return Sum.create(scope, input, axis, options); } /** - * Applies sparse `updates` to individual values or slices within a given - *

        - * variable according to `indices`. - *

        - * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - *

        - * `indices` must be integer tensor, containing indices into `ref`. - * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. - *

        - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - * dimension of `ref`. - *

        - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - *

        - * $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ - *

        - * For example, say we want to update 4 scattered elements to a rank-1 tensor to - * 8 elements. In Python, that update would look like this: - *

        {@code
        -   *      ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
        -   *      indices = tf.constant([[4], [3], [1] ,[7]])
        -   *      updates = tf.constant([9, 10, 11, 12])
        -   *      update = tf.scatter_nd_update(ref, indices, updates)
        -   *      with tf.Session() as sess:
        -   *        print sess.run(update)
        -   *  }
        - * The resulting update to ref would look like this: - *

        - * [1, 11, 3, 10, 9, 6, 7, 12] + * Forwards `data` to the output port determined by `pred`. *

        - * See `tf.scatter_nd` for more details about how to make updates to - * slices. + * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, + * the data goes to `output_false`. *

        - * See also `tf.scatter_update` and `tf.batch_scatter_update`. + * See also `RefSwitch` and `Merge`. * - * @param data type for {@code outputRef()} output - * @param ref A mutable Tensor. Should be from a Variable node. - * @param indices A Tensor. Must be one of the following types: int32, int64. - * A tensor of indices into ref. - * @param updates A Tensor. Must have the same type as ref. A tensor of updated - * values to add to ref. - * @param options carries optional attributes values - * @return a new instance of ScatterNdUpdate + * @param data type for {@code outputFalse()} output + * @param data The tensor to be forwarded to the appropriate output. + * @param pred A scalar that specifies which output port will receive data. + * @return a new instance of SwitchCond */ - public ScatterNdUpdate scatterNdUpdate(Operand ref, - Operand indices, Operand updates, ScatterNdUpdate.Options... options) { - return ScatterNdUpdate.create(scope, ref, indices, updates, options); + public SwitchCond switchCond(Operand data, Operand pred) { + return SwitchCond.create(scope, data, pred); } /** - * Subtracts sparse updates to a variable reference. + * Returns a tensor that may be mutated, but only persists within a single step. *

        - *

        {@code
        -   *      # Scalar indices
        -   *      ref[indices, ...] -= updates[...]
        -   *
        -   *      # Vector indices (for each i)
        -   *      ref[indices[i], ...] -= updates[i, ...]
        -   *
        -   *      # High rank indices (for each i, ..., j)
        -   *      ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
        -   *  }
        - * This operation outputs `ref` after the update is done. - * This makes it easier to chain operations that need to use the reset value. + * This is an experimental op for internal use only and it is possible to use this + * op in unsafe ways. DO NOT USE unless you fully understand the risks. *

        - * Duplicate entries are handled correctly: if multiple `indices` reference - * the same location, their (negated) contributions add. + * It is the caller's responsibility to ensure that 'ref' is eventually passed to a + * matching 'DestroyTemporaryVariable' op after all other uses have completed. *

        - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * Outputs a ref to the tensor state so it may be read or modified. *

        - *

        - * - *
        + * E.g. + * var = state_ops._temporary_variable([1, 2], types.float_) + * var_name = var.op.name + * var = state_ops.assign(var, [[4.0, 5.0]]) + * var = state_ops.assign_add(var, [[6.0, 7.0]]) + * final = state_ops._destroy_temporary_variable(var, var_name=var_name) * - * @param data type for {@code outputRef()} output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to subtract from `ref`. + * @param data type for {@code ref()} output + * @param shape The shape of the variable tensor. + * @param dtype The type of elements in the variable tensor. * @param options carries optional attributes values - * @return a new instance of ScatterSub + * @return a new instance of TemporaryVariable */ - public ScatterSub scatterSub(Operand ref, - Operand indices, Operand updates, ScatterSub.Options... options) { - return ScatterSub.create(scope, ref, indices, updates, options); + public TemporaryVariable temporaryVariable(Shape shape, DataType dtype, + TemporaryVariable.Options... options) { + return TemporaryVariable.create(scope, shape, dtype, options); } /** - * Applies sparse updates to a variable reference. - *

        - * This operation computes - *

        {@code
        -   *      # Scalar indices
        -   *      ref[indices, ...] = updates[...]
        -   *
        -   *      # Vector indices (for each i)
        -   *      ref[indices[i], ...] = updates[i, ...]
        -   *
        -   *      # High rank indices (for each i, ..., j)
        -   *      ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
        -   *  }
        - * This operation outputs `ref` after the update is done. - * This makes it easier to chain operations that need to use the reset value. - *

        - * If values in `ref` is to be updated more than once, because there are - * duplicate entries in `indices`, the order at which the updates happen - * for each value is undefined. - *

        - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - *

        - *

        - * - *
        + * An array of Tensors of given size. *

        - * See also `tf.batch_scatter_update` and `tf.scatter_nd_update`. + * Write data via Write and read via Read or Pack. * - * @param data type for {@code outputRef()} output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to store in `ref`. + * @param size The size of the array. + * @param dtype The type of the elements on the tensor_array. * @param options carries optional attributes values - * @return a new instance of ScatterUpdate + * @return a new instance of TensorArray */ - public ScatterUpdate scatterUpdate(Operand ref, - Operand indices, Operand updates, ScatterUpdate.Options... options) { - return ScatterUpdate.create(scope, ref, indices, updates, options); + public TensorArray tensorArray(Operand size, DataType dtype, + TensorArray.Options... options) { + return TensorArray.create(scope, size, dtype, options); } /** + * Delete the TensorArray from its resource container. + *

        + * This enables the user to close and release the resource in the middle + * of a step/run. * - * @param data type for {@code output()} output - * @param condition - * @param t - * @param e - * @return a new instance of Select + * @param handle The handle to a TensorArray (output of TensorArray or TensorArrayGrad). + * @return a new instance of TensorArrayClose */ - public Select select(Operand condition, Operand t, Operand e) { - return Select.create(scope, condition, t, e); + public TensorArrayClose tensorArrayClose(Operand handle) { + return TensorArrayClose.create(scope, handle); } /** - * Computes the difference between two lists of numbers or strings. + * Concat the elements from the TensorArray into value `value`. *

        - * Given a list `x` and a list `y`, this operation returns a list `out` that - * represents all values that are in `x` but not in `y`. The returned list `out` - * is sorted in the same order that the numbers appear in `x` (duplicates are - * preserved). This operation also returns a list `idx` that represents the - * position of each `out` element in `x`. In other words: + * Takes `T` elements of shapes *

        - * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` + *

        {@code
        +   *    (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
        +   *    }
        + * and concatenates them into a Tensor of shape: *

        - * For example, given this input: - *

        {@code
        -   *  x = [1, 2, 3, 4, 5, 6]
        -   *  y = [1, 3, 5]
        -   *  }
        - * This operation would return: - *
        {@code
        -   *  out ==> [2, 4, 6]
        -   *  idx ==> [1, 3, 5]
        -   *  }
        + *
        {@code
        +   *  (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)}
        + * All elements must have the same shape (excepting the first dimension). * - * @param data type for {@code out()} output - * @param data type for {@code idx()} output - * @param x 1-D. Values to keep. - * @param y 1-D. Values to remove. - * @return a new instance of SetDiff1d + * @param data type for {@code value()} output + * @param handle The handle to a TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param dtype The type of the elem that is returned. + * @param options carries optional attributes values + * @return a new instance of TensorArrayConcat */ - public SetDiff1d setDiff1d(Operand x, Operand y) { - return SetDiff1d.create(scope, x, y); + public TensorArrayConcat tensorArrayConcat(Operand handle, + Operand flowIn, DataType dtype, TensorArrayConcat.Options... options) { + return TensorArrayConcat.create(scope, handle, flowIn, dtype, options); } /** - * Computes the difference between two lists of numbers or strings. - *

        - * Given a list `x` and a list `y`, this operation returns a list `out` that - * represents all values that are in `x` but not in `y`. The returned list `out` - * is sorted in the same order that the numbers appear in `x` (duplicates are - * preserved). This operation also returns a list `idx` that represents the - * position of each `out` element in `x`. In other words: - *

        - * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` + * Gather specific elements from the TensorArray into output `value`. *

        - * For example, given this input: - *

        {@code
        -   *  x = [1, 2, 3, 4, 5, 6]
        -   *  y = [1, 3, 5]
        -   *  }
        - * This operation would return: - *
        {@code
        -   *  out ==> [2, 4, 6]
        -   *  idx ==> [1, 3, 5]
        -   *  }
        + * All elements selected by `indices` must have the same shape. * - * @param data type for {@code out()} output - * @param data type for {@code idx()} output - * @param x 1-D. Values to keep. - * @param y 1-D. Values to remove. - * @param outIdx - * @return a new instance of SetDiff1d + * @param data type for {@code value()} output + * @param handle The handle to a TensorArray. + * @param indices The locations in the TensorArray from which to read tensor elements. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param dtype The type of the elem that is returned. + * @param options carries optional attributes values + * @return a new instance of TensorArrayGather */ - public SetDiff1d setDiff1d(Operand x, Operand y, - DataType outIdx) { - return SetDiff1d.create(scope, x, y, outIdx); + public TensorArrayGather tensorArrayGather(Operand handle, + Operand indices, Operand flowIn, DataType dtype, + TensorArrayGather.Options... options) { + return TensorArrayGather.create(scope, handle, indices, flowIn, dtype, options); } /** - * Number of unique elements along last dimension of input `set`. + * Creates a TensorArray for storing the gradients of values in the given handle. *

        - * Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`, - * and `set_shape`. The last dimension contains values in a set, duplicates are - * allowed but ignored. + * If the given TensorArray gradient already exists, returns a reference to it. *

        - * If `validate_indices` is `True`, this op validates the order and range of `set` - * indices. + * Locks the size of the original TensorArray by disabling its dynamic size flag. + *

        + * *A note about the input flow_in:** + *

        + * The handle flow_in forces the execution of the gradient lookup to occur + * only after certain other operations have occurred. For example, when + * the forward TensorArray is dynamically sized, writes to this TensorArray + * may resize the object. The gradient TensorArray is statically sized based + * on the size of the forward TensorArray when this operation executes. + * Furthermore, the size of the forward TensorArray is frozen by this call. + * As a result, the flow is used to ensure that the call to generate the gradient + * TensorArray only happens after all writes are executed. + *

        + * In the case of dynamically sized TensorArrays, gradient computation should + * only be performed on read operations that have themselves been chained via + * flow to occur only after all writes have executed. That way the final size + * of the forward TensorArray is known when this operation is called. + *

        + * *A note about the source attribute:** + *

        + * TensorArray gradient calls use an accumulator TensorArray object. If + * multiple gradients are calculated and run in the same session, the multiple + * gradient nodes may accidentally flow through the same accumulator TensorArray. + * This double counts and generally breaks the TensorArray gradient flow. + *

        + * The solution is to identify which gradient call this particular + * TensorArray gradient is being called in. This is performed by identifying + * a unique string (e.g. "gradients", "gradients_1", ...) from the input + * gradient Tensor's name. This string is used as a suffix when creating + * the TensorArray gradient object here (the attribute `source`). + *

        + * The attribute `source` is added as a suffix to the forward TensorArray's + * name when performing the creation / lookup, so that each separate gradient + * calculation gets its own TensorArray accumulator. * - * @param setIndices 2D `Tensor`, indices of a `SparseTensor`. - * @param setValues 1D `Tensor`, values of a `SparseTensor`. - * @param setShape 1D `Tensor`, shape of a `SparseTensor`. - * @param options carries optional attributes values - * @return a new instance of SetSize + * @param handle The handle to the forward TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param source The gradient source string, used to decide which gradient TensorArray + * to return. + * @return a new instance of TensorArrayGrad */ - public SetSize setSize(Operand setIndices, Operand setValues, - Operand setShape, SetSize.Options... options) { - return SetSize.create(scope, setIndices, setValues, setShape, options); + public TensorArrayGrad tensorArrayGrad(Operand handle, Operand flowIn, + String source) { + return TensorArrayGrad.create(scope, handle, flowIn, source); } /** - * Returns the shape of a tensor. - *

        - * This operation returns a 1-D integer tensor representing the shape of `input`. + * Creates a TensorArray for storing multiple gradients of values in the given handle. *

        - * For example: - *

        {@code
        -   *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
        -   *  shape(t) ==> [2, 2, 3]
        -   *  }
        + * Similar to TensorArrayGradV3. However it creates an accumulator with an + * expanded shape compared to the input TensorArray whose gradient is being + * computed. This enables multiple gradients for the same TensorArray to be + * calculated using the same accumulator. * - * @param data type for {@code output()} output - * @param input - * @return a new instance of Shape + * @param handle The handle to the forward TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param shapeToPrepend An int32 vector representing a shape. Elements in the gradient accumulator will + * have shape which is this shape_to_prepend value concatenated with shape of the + * elements in the TensorArray corresponding to the input handle. + * @param source The gradient source string, used to decide which gradient TensorArray + * to return. + * @return a new instance of TensorArrayGradWithShape */ - public org.tensorflow.op.core.Shape shape(Operand input) { - return org.tensorflow.op.core.Shape.create(scope, input); + public TensorArrayGradWithShape tensorArrayGradWithShape(Operand handle, + Operand flowIn, Operand shapeToPrepend, String source) { + return TensorArrayGradWithShape.create(scope, handle, flowIn, shapeToPrepend, source); } /** - * Returns the shape of a tensor. - *

        - * This operation returns a 1-D integer tensor representing the shape of `input`. - *

        - * For example: - *

        {@code
        -   *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
        -   *  shape(t) ==> [2, 2, 3]
        -   *  }
        * - * @param data type for {@code output()} output - * @param input - * @param outType - * @return a new instance of Shape + * @param data type for {@code value()} output + * @param handle + * @param flowIn + * @param dtype + * @param options carries optional attributes values + * @return a new instance of TensorArrayPack */ - public org.tensorflow.op.core.Shape shape( - Operand input, DataType outType) { - return org.tensorflow.op.core.Shape.create(scope, input, outType); + public TensorArrayPack tensorArrayPack(Operand handle, + Operand flowIn, DataType dtype, TensorArrayPack.Options... options) { + return TensorArrayPack.create(scope, handle, flowIn, dtype, options); } /** - * Returns shape of tensors. - *

        - * This operation returns N 1-D integer tensors representing shape of `input[i]s`. + * Read an element from the TensorArray into output `value`. * - * @param data type for {@code output()} output - * @param input - * @return a new instance of ShapeN + * @param data type for {@code value()} output + * @param handle The handle to a TensorArray. + * @param index + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param dtype The type of the elem that is returned. + * @return a new instance of TensorArrayRead */ - public ShapeN shapeN(Iterable> input) { - return ShapeN.create(scope, input); + public TensorArrayRead tensorArrayRead(Operand handle, + Operand index, Operand flowIn, DataType dtype) { + return TensorArrayRead.create(scope, handle, index, flowIn, dtype); } /** - * Returns shape of tensors. + * Scatter the data from the input value into specific TensorArray elements. *

        - * This operation returns N 1-D integer tensors representing shape of `input[i]s`. + * `indices` must be a vector, its length must match the first dim of `value`. * - * @param data type for {@code output()} output - * @param input - * @param outType - * @return a new instance of ShapeN + * @param handle The handle to a TensorArray. + * @param indices The locations at which to write the tensor elements. + * @param value The concatenated tensor to write to the TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @return a new instance of TensorArrayScatter */ - public ShapeN shapeN(Iterable> input, - DataType outType) { - return ShapeN.create(scope, input, outType); + public TensorArrayScatter tensorArrayScatter(Operand handle, + Operand indices, Operand value, Operand flowIn) { + return TensorArrayScatter.create(scope, handle, indices, value, flowIn); } /** - * Returns the size of a tensor. - *

        - * This operation returns an integer representing the number of elements in - * `input`. - *

        - * For example: - *

        {@code
        -   *  # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
        -   *  size(t) ==> 12
        -   *  }
        + * Get the current size of the TensorArray. * - * @param data type for {@code output()} output - * @param input - * @return a new instance of Size + * @param handle The handle to a TensorArray (output of TensorArray or TensorArrayGrad). + * @param flowIn A float scalar that enforces proper chaining of operations. + * @return a new instance of TensorArraySize */ - public Size size(Operand input) { - return Size.create(scope, input); + public TensorArraySize tensorArraySize(Operand handle, Operand flowIn) { + return TensorArraySize.create(scope, handle, flowIn); } /** - * Returns the size of a tensor. + * Split the data from the input value into TensorArray elements. *

        - * This operation returns an integer representing the number of elements in - * `input`. + * Assuming that `lengths` takes on values *

        - * For example: - *

        {@code
        -   *  # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
        -   *  size(t) ==> 12
        -   *  }
        + *
        {@code
        +   *  (n0, n1, ..., n(T-1))}
        + * and that `value` has shape + *

        + *

        {@code
        +   *  (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)}
        + * , + *

        + * this splits values into a TensorArray with T tensors. + *

        + * TensorArray index t will be the subtensor of values with starting position + *

        + *

        {@code
        +   *  (n0 + n1 + ... + n(t-1), 0, 0, ...)}
        + * and having size + *

        + *

        {@code
        +   *  nt x d0 x d1 x ...}
        * - * @param data type for {@code output()} output - * @param input - * @param outType - * @return a new instance of Size + * @param handle The handle to a TensorArray. + * @param value The concatenated tensor to write to the TensorArray. + * @param lengths The vector of lengths, how to split the rows of value into the + * TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @return a new instance of TensorArraySplit */ - public Size size(Operand input, DataType outType) { - return Size.create(scope, input, outType); + public TensorArraySplit tensorArraySplit(Operand handle, Operand value, + Operand lengths, Operand flowIn) { + return TensorArraySplit.create(scope, handle, value, lengths, flowIn); } /** - * Parses a text file and creates a batch of examples. * - * @param filename The corpus's text file name. - * @param batchSize The size of produced batch. - * @param options carries optional attributes values - * @return a new instance of Skipgram + * @param handle + * @param value + * @param flowIn + * @return a new instance of TensorArrayUnpack */ - public Skipgram skipgram(String filename, Long batchSize, Skipgram.Options... options) { - return Skipgram.create(scope, filename, batchSize, options); + public TensorArrayUnpack tensorArrayUnpack(Operand handle, + Operand value, Operand flowIn) { + return TensorArrayUnpack.create(scope, handle, value, flowIn); } /** - * Return a slice from 'input'. + * Push an element onto the tensor_array. + * + * @param handle The handle to a TensorArray. + * @param index The position to write to inside the TensorArray. + * @param value The tensor to write to the TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @return a new instance of TensorArrayWrite + */ + public TensorArrayWrite tensorArrayWrite(Operand handle, + Operand index, Operand value, Operand flowIn) { + return TensorArrayWrite.create(scope, handle, index, value, flowIn); + } + + /** + * Concats all tensors in the list along the 0th dimension. *

        - * The output tensor is a tensor with dimensions described by 'size' - * whose values are extracted from 'input' starting at the offsets in - * 'begin'. + * Requires that all tensors have the same shape except the first dimension. *

        - * Requirements: - * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) + * input_handle: The input list. + * element_shape: The shape of the uninitialized elements in the list. If the first + * dimension is not -1, it is assumed that all list elements have the same + * leading dim. + * leading_dims: The list of leading dims of uninitialized list elements. Used if + * the leading dim of input_handle.element_shape or the element_shape input arg + * is not already set. + * tensor: The concated result. + * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient. * - * @param data type for {@code output()} output - * @param input - * @param begin begin[i] specifies the offset into the 'i'th dimension of - * 'input' to slice from. - * @param size size[i] specifies the number of elements of the 'i'th dimension - * of 'input' to slice. If size[i] is -1, all remaining elements in dimension - * i are included in the slice (i.e. this is equivalent to setting - * size[i] = input.dim_size(i) - begin[i]). - * @return a new instance of Slice + * @param data type for {@code tensor()} output + * @param inputHandle + * @param elementShape + * @param leadingDims + * @param elementDtype + * @return a new instance of TensorListConcat */ - public Slice slice(Operand input, Operand begin, - Operand size) { - return Slice.create(scope, input, begin, size); + public TensorListConcat tensorListConcat( + Operand inputHandle, Operand elementShape, Operand leadingDims, + DataType elementDtype) { + return TensorListConcat.create(scope, inputHandle, elementShape, leadingDims, elementDtype); } /** - * Returns a copy of the input tensor. * - * @param data type for {@code output()} output - * @param input - * @return a new instance of Snapshot + * @param inputA + * @param inputB + * @param elementDtype + * @return a new instance of TensorListConcatLists */ - public Snapshot snapshot(Operand input) { - return Snapshot.create(scope, input); + public TensorListConcatLists tensorListConcatLists(Operand inputA, + Operand inputB, DataType elementDtype) { + return TensorListConcatLists.create(scope, inputA, inputB, elementDtype); } /** - * SpaceToBatch for N-D tensors of type T. + * The shape of the elements of the given list, as a tensor. *

        - * This operation divides "spatial" dimensions `[1, ..., M]` of the input into a - * grid of blocks of shape `block_shape`, and interleaves these blocks with the - * "batch" dimension (0) such that in the output, the spatial dimensions - * `[1, ..., M]` correspond to the position within the grid, and the batch - * dimension combines both the position within a spatial block and the original - * batch position. Prior to division into blocks, the spatial dimensions of the - * input are optionally zero padded according to `paddings`. See below for a - * precise description. + * input_handle: the list + * element_shape: the shape of elements of the list * - * @param data type for {@code output()} output - * @param input N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, - * where spatial_shape has `M` dimensions. - * @param blockShape 1-D with shape `[M]`, all values must be >= 1. - * @param paddings 2-D with shape `[M, 2]`, all values must be >= 0. - * `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension - * `i + 1`, which corresponds to spatial dimension `i`. It is required that - * `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. + * @param data type for {@code elementShape()} output + * @param inputHandle + * @param shapeType + * @return a new instance of TensorListElementShape + */ + public TensorListElementShape tensorListElementShape( + Operand inputHandle, DataType shapeType) { + return TensorListElementShape.create(scope, inputHandle, shapeType); + } + + /** + * Creates a TensorList which, when stacked, has the value of `tensor`. *

        - * This operation is equivalent to the following steps: + * Each tensor in the result list corresponds to one row of the input tensor. *

        - * 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the - * input according to `paddings` to produce `padded` of shape `padded_shape`. + * tensor: The input tensor. + * output_handle: The list. + * + * @param tensor + * @param elementShape + * @return a new instance of TensorListFromTensor + */ + public TensorListFromTensor tensorListFromTensor( + Operand tensor, Operand elementShape) { + return TensorListFromTensor.create(scope, tensor, elementShape); + } + + /** + * Creates a Tensor by indexing into the TensorList. *

        - * 2. Reshape `padded` to `reshaped_padded` of shape: + * Each row in the produced Tensor corresponds to the element in the TensorList + * specified by the given index (see `tf.gather`). *

        - * [batch] + - * [padded_shape[1] / block_shape[0], - * block_shape[0], - * ..., - * padded_shape[M] / block_shape[M-1], - * block_shape[M-1]] + - * remaining_shape + * input_handle: The input tensor list. + * indices: The indices used to index into the list. + * values: The tensor. + * + * @param data type for {@code values()} output + * @param inputHandle + * @param indices + * @param elementShape + * @param elementDtype + * @return a new instance of TensorListGather + */ + public TensorListGather tensorListGather(Operand inputHandle, + Operand indices, Operand elementShape, DataType elementDtype) { + return TensorListGather.create(scope, inputHandle, indices, elementShape, elementDtype); + } + + /** + * + * @param data type for {@code item()} output + * @param inputHandle + * @param index + * @param elementShape + * @param elementDtype + * @return a new instance of TensorListGetItem + */ + public TensorListGetItem tensorListGetItem(Operand inputHandle, + Operand index, Operand elementShape, DataType elementDtype) { + return TensorListGetItem.create(scope, inputHandle, index, elementShape, elementDtype); + } + + /** + * Returns the number of tensors in the input tensor list. *

        - * 3. Permute dimensions of `reshaped_padded` to produce - * `permuted_reshaped_padded` of shape: + * input_handle: the input list + * length: the number of tensors in the list + * + * @param inputHandle + * @return a new instance of TensorListLength + */ + public TensorListLength tensorListLength(Operand inputHandle) { + return TensorListLength.create(scope, inputHandle); + } + + /** + * Returns the last element of the input list as well as a list with all but that element. *

        - * block_shape + - * [batch] + - * [padded_shape[1] / block_shape[0], - * ..., - * padded_shape[M] / block_shape[M-1]] + - * remaining_shape + * Fails if the list is empty. *

        - * 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch - * dimension, producing an output tensor of shape: + * input_handle: the input list + * tensor: the withdrawn last element of the list + * element_dtype: the type of elements in the list + * element_shape: the shape of the output tensor + * + * @param data type for {@code tensor()} output + * @param inputHandle + * @param elementShape + * @param elementDtype + * @return a new instance of TensorListPopBack + */ + public TensorListPopBack tensorListPopBack(Operand inputHandle, + Operand elementShape, DataType elementDtype) { + return TensorListPopBack.create(scope, inputHandle, elementShape, elementDtype); + } + + /** + * Returns a list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`. *

        - * [batch * prod(block_shape)] + - * [padded_shape[1] / block_shape[0], - * ..., - * padded_shape[M] / block_shape[M-1]] + - * remaining_shape - *

        - * Some examples: - *

        - * (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and - * `paddings = [[0, 0], [0, 0]]`: - *

        {@code
        -   *  x = [[[[1], [2]], [[3], [4]]]]
        -   *  }
        - * The output tensor has shape `[4, 1, 1, 1]` and value: - *
        {@code
        -   *  [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
        -   *  }
        - * (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and - * `paddings = [[0, 0], [0, 0]]`: - *
        {@code
        -   *  x = [[[[1, 2, 3], [4, 5, 6]],
        -   *        [[7, 8, 9], [10, 11, 12]]]]
        -   *  }
        - * The output tensor has shape `[4, 1, 1, 3]` and value: - *
        {@code
        -   *  [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
        -   *  }
        - * (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and - * `paddings = [[0, 0], [0, 0]]`: - *
        {@code
        -   *  x = [[[[1],   [2],  [3],  [4]],
        -   *        [[5],   [6],  [7],  [8]],
        -   *        [[9],  [10], [11],  [12]],
        -   *        [[13], [14], [15],  [16]]]]
        -   *  }
        - * The output tensor has shape `[4, 2, 2, 1]` and value: - *
        {@code
        -   *  x = [[[[1], [3]], [[9], [11]]],
        -   *       [[[2], [4]], [[10], [12]]],
        -   *       [[[5], [7]], [[13], [15]]],
        -   *       [[[6], [8]], [[14], [16]]]]
        -   *  }
        - * (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and - * paddings = `[[0, 0], [2, 0]]`: - *
        {@code
        -   *  x = [[[[1],   [2],  [3],  [4]],
        -   *        [[5],   [6],  [7],  [8]]],
        -   *       [[[9],  [10], [11],  [12]],
        -   *        [[13], [14], [15],  [16]]]]
        -   *  }
        - * The output tensor has shape `[8, 1, 3, 1]` and value: - *
        {@code
        -   *  x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
        -   *       [[[0], [2], [4]]], [[[0], [10], [12]]],
        -   *       [[[0], [5], [7]]], [[[0], [13], [15]]],
        -   *       [[[0], [6], [8]]], [[[0], [14], [16]]]]
        -   *  }
        - * Among others, this operation is useful for reducing atrous convolution into - * regular convolution. - * @return a new instance of SpaceToBatchNd + * tensor: The tensor to put on the list. + * input_handle: The old list. + * output_handle: A list with the elements of the old list followed by tensor. + * element_dtype: the type of elements in the list. + * element_shape: a shape compatible with that of elements in the list. + * + * @param inputHandle + * @param tensor + * @return a new instance of TensorListPushBack */ - public SpaceToBatchNd spaceToBatchNd( - Operand input, Operand blockShape, Operand paddings) { - return SpaceToBatchNd.create(scope, input, blockShape, paddings); + public TensorListPushBack tensorListPushBack(Operand inputHandle, + Operand tensor) { + return TensorListPushBack.create(scope, inputHandle, tensor); } /** - * Splits a tensor into `num_split` tensors along one dimension. * - * @param data type for {@code output()} output - * @param axis 0-D. The dimension along which to split. Must be in the range - * `[-rank(value), rank(value))`. - * @param value The tensor to split. - * @param numSplit The number of ways to split. Must evenly divide - * `value.shape[split_dim]`. - * @return a new instance of Split + * @param inputHandles + * @param tensor + * @return a new instance of TensorListPushBackBatch */ - public Split split(Operand axis, Operand value, Long numSplit) { - return Split.create(scope, axis, value, numSplit); + public TensorListPushBackBatch tensorListPushBackBatch(Operand inputHandles, + Operand tensor) { + return TensorListPushBackBatch.create(scope, inputHandles, tensor); } /** - * Splits a tensor into `num_split` tensors along one dimension. + * List of the given size with empty elements. + *

        + * element_shape: the shape of the future elements of the list + * num_elements: the number of elements to reserve + * handle: the output list + * element_dtype: the desired type of elements in the list. * - * @param data type for {@code output()} output - * @param value The tensor to split. - * @param sizeSplits list containing the sizes of each output tensor along the split - * dimension. Must sum to the dimension of value along split_dim. - * Can contain one -1 indicating that dimension is to be inferred. - * @param axis 0-D. The dimension along which to split. Must be in the range - * `[-rank(value), rank(value))`. - * @param numSplit - * @return a new instance of SplitV + * @param elementShape + * @param numElements + * @param elementDtype + * @return a new instance of TensorListReserve */ - public SplitV splitV(Operand value, - Operand sizeSplits, Operand axis, Long numSplit) { - return SplitV.create(scope, value, sizeSplits, axis, numSplit); + public TensorListReserve tensorListReserve( + Operand elementShape, Operand numElements, DataType elementDtype) { + return TensorListReserve.create(scope, elementShape, numElements, elementDtype); } /** - * Removes dimensions of size 1 from the shape of a tensor. - *

        - * Given a tensor `input`, this operation returns a tensor of the same type with - * all dimensions of size 1 removed. If you don't want to remove all size 1 - * dimensions, you can remove specific size 1 dimensions by specifying - * `axis`. + * Resizes the list. *

        - * For example: - *

        {@code
        -   *  # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
        -   *  shape(squeeze(t)) ==> [2, 3]
        -   *  }
        - * Or, to remove specific size 1 dimensions: - *
        {@code
        -   *  # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
        -   *  shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
        -   *  }
        * - * @param data type for {@code output()} output - * @param input The `input` to squeeze. - * @param options carries optional attributes values - * @return a new instance of Squeeze + * input_handle: the input list + * size: size of the output list + * + * @param inputHandle + * @param size + * @return a new instance of TensorListResize */ - public Squeeze squeeze(Operand input, Squeeze.Options... options) { - return Squeeze.create(scope, input, options); + public TensorListResize tensorListResize(Operand inputHandle, Operand size) { + return TensorListResize.create(scope, inputHandle, size); } /** - * Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. - *

        - * Packs the `N` tensors in `values` into a tensor with rank one higher than each - * tensor in `values`, by packing them along the `axis` dimension. - * Given a list of tensors of shape `(A, B, C)`; + * Creates a TensorList by indexing into a Tensor. *

        - * if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. - * if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. - * Etc. + * Each member of the TensorList corresponds to one row of the input tensor, + * specified by the given index (see `tf.gather`). *

        - * For example: - *

        {@code
        -   *  # 'x' is [1, 4]
        -   *  # 'y' is [2, 5]
        -   *  # 'z' is [3, 6]
        -   *  pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
        -   *  pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
        -   *  }
        - * This is the opposite of `unpack`. + * tensor: The input tensor. + * indices: The indices used to index into the list. + * element_shape: The shape of the elements in the list (can be less specified than + * the shape of the tensor). + * num_elements: The size of the output list. Must be large enough to accommodate + * the largest index in indices. If -1, the list is just large enough to include + * the largest index in indices. + * output_handle: The TensorList. * - * @param data type for {@code output()} output - * @param values Must be of same shape and type. - * @param options carries optional attributes values - * @return a new instance of Stack + * @param tensor + * @param indices + * @param elementShape + * @param numElements + * @return a new instance of TensorListScatter */ - public Stack stack(Iterable> values, Stack.Options... options) { - return Stack.create(scope, values, options); + public TensorListScatter tensorListScatter(Operand tensor, + Operand indices, Operand elementShape, Operand numElements) { + return TensorListScatter.create(scope, tensor, indices, elementShape, numElements); } /** - * Stage values similar to a lightweight Enqueue. + * Scatters tensor at indices in an input list. *

        - * The basic functionality of this Op is similar to a queue with many - * fewer capabilities and options. This Op is optimized for performance. + * Each member of the TensorList corresponds to one row of the input tensor, + * specified by the given index (see `tf.gather`). + *

        + * input_handle: The list to scatter into. + * tensor: The input tensor. + * indices: The indices used to index into the list. + * output_handle: The TensorList. * - * @param values a list of tensors - * dtypes A list of data types that inserted values should adhere to. - * @param options carries optional attributes values - * @return a new instance of Stage + * @param inputHandle + * @param tensor + * @param indices + * @return a new instance of TensorListScatterIntoExistingList */ - public Stage stage(Iterable> values, Stage.Options... options) { - return Stage.create(scope, values, options); + public TensorListScatterIntoExistingList tensorListScatterIntoExistingList( + Operand inputHandle, Operand tensor, Operand indices) { + return TensorListScatterIntoExistingList.create(scope, inputHandle, tensor, indices); } /** - * Op removes all elements in the underlying container. * - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of StageClear + * @param inputHandle + * @param index + * @param item + * @return a new instance of TensorListSetItem */ - public StageClear stageClear(List> dtypes, StageClear.Options... options) { - return StageClear.create(scope, dtypes, options); + public TensorListSetItem tensorListSetItem(Operand inputHandle, + Operand index, Operand item) { + return TensorListSetItem.create(scope, inputHandle, index, item); } /** - * Op peeks at the values at the specified index. If the + * Splits a tensor into a list. *

        - * underlying container does not contain sufficient elements - * this op will block until it does. This Op is optimized for - * performance. + * list[i] corresponds to lengths[i] tensors from the input tensor. + * The tensor must have rank at least 1 and contain exactly sum(lengths) elements. + *

        + * tensor: The input tensor. + * element_shape: A shape compatible with that of elements in the tensor. + * lengths: Vector of sizes of the 0th dimension of tensors in the list. + * output_handle: The list. * - * @param index - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of StagePeek + * @param tensor + * @param elementShape + * @param lengths + * @return a new instance of TensorListSplit */ - public StagePeek stagePeek(Operand index, List> dtypes, - StagePeek.Options... options) { - return StagePeek.create(scope, index, dtypes, options); + public TensorListSplit tensorListSplit(Operand tensor, + Operand elementShape, Operand lengths) { + return TensorListSplit.create(scope, tensor, elementShape, lengths); } /** - * Op returns the number of elements in the underlying container. - * - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of StageSize - */ - public StageSize stageSize(List> dtypes, StageSize.Options... options) { - return StageSize.create(scope, dtypes, options); - } - - /** - * Stops gradient computation. - *

        - * When executed in a graph, this op outputs its input tensor as-is. + * Stacks all tensors in the list. *

        - * When building ops to compute gradients, this op prevents the contribution of - * its inputs to be taken into account. Normally, the gradient generator adds ops - * to a graph to compute the derivatives of a specified 'loss' by recursively - * finding out inputs that contributed to its computation. If you insert this op - * in the graph it inputs are masked from the gradient generator. They are not - * taken into account for computing gradients. + * Requires that all tensors have the same shape. *

        - * This is useful any time you want to compute a value with TensorFlow but need - * to pretend that the value was a constant. Some examples include: - *

          - *
        • - * The EM algorithm where the M-step should not involve backpropagation - * through the output of the E-step. - *
        • - *
        • - * Contrastive divergence training of Boltzmann machines where, when - * differentiating the energy function, the training must not backpropagate - * through the graph that generated the samples from the model. - *
        • - *
        • - * Adversarial training, where no backprop should happen through the adversarial - * example generation process. + * input_handle: the input list + * tensor: the gathered result + * num_elements: optional. If not -1, the number of elements in the list. * - * @param data type for {@code output()} output - * @param input - * @return a new instance of StopGradient + * @param data type for {@code tensor()} output + * @param inputHandle + * @param elementShape + * @param elementDtype + * @param options carries optional attributes values + * @return a new instance of TensorListStack */ - public StopGradient stopGradient(Operand input) { - return StopGradient.create(scope, input); + public TensorListStack tensorListStack(Operand inputHandle, + Operand elementShape, DataType elementDtype, TensorListStack.Options... options) { + return TensorListStack.create(scope, inputHandle, elementShape, elementDtype, options); } /** - * Return a strided slice from `input`. + * Adds sparse `updates` to an existing tensor according to `indices`. *

          - * Note, most python users will want to use the Python `Tensor.__getitem__` - * or `Variable.__getitem__` rather than this op directly. + * This operation creates a new tensor by adding sparse `updates` to the passed + * in `tensor`. + * This operation is very similar to `tf.scatter_nd_add`, except that the updates + * are added onto an existing tensor (as opposed to a variable). If the memory + * for the existing tensor cannot be re-used, a copy is made and updated. *

          - * The goal of this op is to produce a new tensor with a subset of - * the elements from the `n` dimensional `input` tensor. The subset is chosen using - * a sequence of `m` sparse range specifications encoded into the arguments - * of this function. Note, in some cases - * `m` could be equal to `n`, but this need not be the case. Each - * range specification entry can be one of the following: + * `indices` is an integer tensor containing indices into a new tensor of shape + * `shape`. The last dimension of `indices` can be at most the rank of `shape`: *

          - * - An ellipsis (...). Ellipses are used to imply zero or more - * dimensions of full-dimension selection and are produced using - * `ellipsis_mask`. For example, `foo[...]` is the identity slice. + * indices.shape[-1] <= shape.rank *

          - * - A new axis. This is used to insert a new shape=1 dimension and is - * produced using `new_axis_mask`. For example, `foo[:, ...]` where - * `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor. + * The last dimension of `indices` corresponds to indices into elements + * (if `indices.shape[-1] = shape.rank`) or slices + * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + * `shape`. `updates` is a tensor with shape *

          - * - A range `begin:end:stride`. This is used to specify how much to choose from - * a given dimension. `stride` can be any integer but 0. `begin` is an integer - * which represents the index of the first value to select while `end` represents - * the index of the last value to select. The number of values selected in each - * dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`. - * `begin` and `end` can be negative where `-1` is the last element, `-2` is - * the second to last. `begin_mask` controls whether to replace the explicitly - * given `begin` with an implicit effective value of `0` if `stride > 0` and - * `-1` if `stride < 0`. `end_mask` is analogous but produces the number - * required to create the largest open interval. For example, given a shape - * `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do - * not assume this is equivalent to `foo[0:-1]` which has an effective `begin` - * and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the - * first dimension of a tensor while dropping the last two (in the original - * order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`. + * indices.shape[:-1] + shape[indices.shape[-1]:] *

          - * - A single index. This is used to keep only elements that have a given - * index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a - * shape `(6,)` tensor. This is encoded in `begin` and `end` and - * `shrink_axis_mask`. + * The simplest form of tensor_scatter_add is to add individual elements to a + * tensor by index. For example, say we want to add 4 elements in a rank-1 + * tensor with 8 elements. *

          - * Each conceptual range specification is encoded in the op's argument. This - * encoding is best understand by considering a non-trivial example. In - * particular, - * `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as + * In Python, this scatter add operation would look like this: *

          {@code
          -   *  begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
          -   *  end = [2, 4, x, x, -3, x]
          -   *  strides = [1, 1, x, x, -1, 1]
          -   *  begin_mask = 1<<4 | 1 << 5 = 48
          -   *  end_mask = 1<<5 = 32
          -   *  ellipsis_mask = 1<<3 = 8
          -   *  new_axis_mask = 1<<2 4
          -   *  shrink_axis_mask = 1<<0
          +   *      indices = tf.constant([[4], [3], [1], [7]])
          +   *      updates = tf.constant([9, 10, 11, 12])
          +   *      tensor = tf.ones([8], dtype=tf.int32)
          +   *      updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
          +   *      print(updated)
              *  }
          - * In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of - * the slice becomes (2, 1, 5, 5, 2, 5). - * Let us walk step by step through each argument specification. - *

          - * 1. The first argument in the example slice is turned into `begin = 1` and - * `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we - * also set the appropriate bit in `shrink_axis_mask`. - *

          - * 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have - * zero bits contributed. + * The resulting tensor would look like this: *

          - * 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 - * dimension in the final shape. Dummy values are contributed to begin, - * end and stride, while the new_axis_mask bit is set. + * [1, 12, 1, 11, 10, 1, 1, 13] *

          - * 4. `...` grab the full ranges from as many dimensions as needed to - * fully specify a slice for every dimension of the input shape. + * We can also, insert entire slices of a higher rank tensor all at once. For + * example, if we wanted to insert two slices in the first dimension of a + * rank-3 tensor with two matrices of new values. *

          - * 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated - * with a dimension that has shape `s` is converted to a positive index - * `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion - * is done internally so begin, end and strides receive x, -3, and -1. - * The appropriate begin_mask bit is set to indicate the start range is the - * full range (ignoring the x). + * In Python, this scatter add operation would look like this: + *

          {@code
          +   *      indices = tf.constant([[0], [2]])
          +   *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
          +   *                              [7, 7, 7, 7], [8, 8, 8, 8]],
          +   *                             [[5, 5, 5, 5], [6, 6, 6, 6],
          +   *                              [7, 7, 7, 7], [8, 8, 8, 8]]])
          +   *      tensor = tf.ones([4, 4, 4],dtype=tf.int32)
          +   *      updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
          +   *      print(updated)
          +   *  }
          + * The resulting tensor would look like this: *

          - * 6. `:` indicates that the entire contents of the corresponding dimension - * is selected. This is equivalent to `::` or `0::1`. begin, end, and strides - * receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and - * `end_mask` are also set. + * [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], + * [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] *

          - * Requirements: - * `0 != strides[i] for i in [0, m)` - * `ellipsis_mask must be a power of two (only one ellipsis)` + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, the index is ignored. * * @param data type for {@code output()} output - * @param input - * @param begin `begin[k]` specifies the offset into the `k`th range specification. - * The exact dimension this corresponds to will be determined by context. - * Out-of-bounds values will be silently clamped. If the `k`th bit of - * `begin_mask` then `begin[k]` is ignored and the full range of the - * appropriate dimension is used instead. Negative values causes indexing - * to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`. - * @param end `end[i]` is like `begin` with the exception that `end_mask` is - * used to determine full ranges. - * @param strides `strides[i]` specifies the increment in the `i`th specification - * after extracting a given element. Negative indices will reverse - * the original order. Out or range values are - * clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0` - * @param options carries optional attributes values - * @return a new instance of StridedSlice + * @param tensor Tensor to copy/update. + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @return a new instance of TensorScatterNdAdd */ - public StridedSlice stridedSlice(Operand input, - Operand begin, Operand end, Operand strides, StridedSlice.Options... options) { - return StridedSlice.create(scope, input, begin, end, strides, options); + public TensorScatterNdAdd tensorScatterNdAdd( + Operand tensor, Operand indices, Operand updates) { + return TensorScatterNdAdd.create(scope, tensor, indices, updates); } /** - * Assign `value` to the sliced l-value reference of `ref`. + * Subtracts sparse `updates` from an existing tensor according to `indices`. *

          - * The values of `value` are assigned to the positions in the variable - * `ref` that are selected by the slice parameters. The slice parameters - * `begin`, `end`, `strides`, etc. work exactly as in `StridedSlice`. + * This operation creates a new tensor by subtracting sparse `updates` from the + * passed in `tensor`. + * This operation is very similar to `tf.scatter_nd_sub`, except that the updates + * are subtracted from an existing tensor (as opposed to a variable). If the memory + * for the existing tensor cannot be re-used, a copy is made and updated. *

          - * NOTE this op currently does not support broadcasting and so `value`'s - * shape must be exactly the shape produced by the slice of `ref`. - * - * @param data type for {@code outputRef()} output - * @param ref - * @param begin - * @param end - * @param strides - * @param value - * @param options carries optional attributes values - * @return a new instance of StridedSliceAssign - */ - public StridedSliceAssign stridedSliceAssign( - Operand ref, Operand begin, Operand end, Operand strides, Operand value, - StridedSliceAssign.Options... options) { - return StridedSliceAssign.create(scope, ref, begin, end, strides, value, options); - } - - /** - * Returns the gradient of `StridedSlice`. + * `indices` is an integer tensor containing indices into a new tensor of shape + * `shape`. The last dimension of `indices` can be at most the rank of `shape`: *

          - * Since `StridedSlice` cuts out pieces of its `input` which is size - * `shape`, its gradient will have the same shape (which is passed here - * as `shape`). The gradient will be zero in any element that the slice - * does not select. + * indices.shape[-1] <= shape.rank *

          - * Arguments are the same as StridedSliceGrad with the exception that - * `dy` is the input gradient to be propagated and `shape` is the - * shape of `StridedSlice`'s `input`. - * - * @param data type for {@code output()} output - * @param shape - * @param begin - * @param end - * @param strides - * @param dy - * @param options carries optional attributes values - * @return a new instance of StridedSliceGrad - */ - public StridedSliceGrad stridedSliceGrad(Operand shape, - Operand begin, Operand end, Operand strides, Operand dy, - StridedSliceGrad.Options... options) { - return StridedSliceGrad.create(scope, shape, begin, end, strides, dy, options); - } - - /** - * Computes the sum of elements across dimensions of a tensor. + * The last dimension of `indices` corresponds to indices into elements + * (if `indices.shape[-1] = shape.rank`) or slices + * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + * `shape`. `updates` is a tensor with shape *

          - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are - * retained with length 1. + * indices.shape[:-1] + shape[indices.shape[-1]:] + *

          + * The simplest form of tensor_scatter_sub is to subtract individual elements + * from a tensor by index. For example, say we want to insert 4 scattered elements + * in a rank-1 tensor with 8 elements. + *

          + * In Python, this scatter subtract operation would look like this: + *

          {@code
          +   *      indices = tf.constant([[4], [3], [1], [7]])
          +   *      updates = tf.constant([9, 10, 11, 12])
          +   *      tensor = tf.ones([8], dtype=tf.int32)
          +   *      updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
          +   *      print(updated)
          +   *  }
          + * The resulting tensor would look like this: + *

          + * [1, -10, 1, -9, -8, 1, 1, -11] + *

          + * We can also, insert entire slices of a higher rank tensor all at once. For + * example, if we wanted to insert two slices in the first dimension of a + * rank-3 tensor with two matrices of new values. + *

          + * In Python, this scatter add operation would look like this: + *

          {@code
          +   *      indices = tf.constant([[0], [2]])
          +   *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
          +   *                              [7, 7, 7, 7], [8, 8, 8, 8]],
          +   *                             [[5, 5, 5, 5], [6, 6, 6, 6],
          +   *                              [7, 7, 7, 7], [8, 8, 8, 8]]])
          +   *      tensor = tf.ones([4, 4, 4],dtype=tf.int32)
          +   *      updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
          +   *      print(updated)
          +   *  }
          + * The resulting tensor would look like this: + *

          + * [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], + * [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] + *

          + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, the index is ignored. * * @param data type for {@code output()} output - * @param input The tensor to reduce. - * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values - * @return a new instance of Sum + * @param tensor Tensor to copy/update. + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @return a new instance of TensorScatterNdSub */ - public Sum sum(Operand input, Operand axis, - Sum.Options... options) { - return Sum.create(scope, input, axis, options); + public TensorScatterNdSub tensorScatterNdSub( + Operand tensor, Operand indices, Operand updates) { + return TensorScatterNdSub.create(scope, tensor, indices, updates); } /** - * Forwards `data` to the output port determined by `pred`. + * Scatter `updates` into an existing tensor according to `indices`. *

          - * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, - * the data goes to `output_false`. + * This operation creates a new tensor by applying sparse `updates` to the passed + * in `tensor`. + * This operation is very similar to `tf.scatter_nd`, except that the updates are + * scattered onto an existing tensor (as opposed to a zero-tensor). If the memory + * for the existing tensor cannot be re-used, a copy is made and updated. *

          - * See also `RefSwitch` and `Merge`. + * If `indices` contains duplicates, then their updates are accumulated (summed). + *

          + * WARNING: The order in which updates are applied is nondeterministic, so the + * output will be nondeterministic if `indices` contains duplicates -- because + * of some numerical approximation issues, numbers summed in different order + * may yield different results. + *

          + * `indices` is an integer tensor containing indices into a new tensor of shape + * `shape`. The last dimension of `indices` can be at most the rank of `shape`: + *

          + * indices.shape[-1] <= shape.rank + *

          + * The last dimension of `indices` corresponds to indices into elements + * (if `indices.shape[-1] = shape.rank`) or slices + * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + * `shape`. `updates` is a tensor with shape + *

          + * indices.shape[:-1] + shape[indices.shape[-1]:] + *

          + * The simplest form of scatter is to insert individual elements in a tensor by + * index. For example, say we want to insert 4 scattered elements in a rank-1 + * tensor with 8 elements. + *

          + *

          + * + *
          + *

          + * In Python, this scatter operation would look like this: + *

          {@code
          +   *      indices = tf.constant([[4], [3], [1], [7]])
          +   *      updates = tf.constant([9, 10, 11, 12])
          +   *      tensor = tf.ones([8], dtype=tf.int32)
          +   *      updated = tf.tensor_scatter_nd_update(tensor, indices, updates)
          +   *      print(updated)
          +   *  }
          + * The resulting tensor would look like this: + *

          + * [1, 11, 1, 10, 9, 1, 1, 12] + *

          + * We can also, insert entire slices of a higher rank tensor all at once. For + * example, if we wanted to insert two slices in the first dimension of a + * rank-3 tensor with two matrices of new values. + *

          + * In Python, this scatter operation would look like this: + *

          {@code
          +   *      indices = tf.constant([[0], [2]])
          +   *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
          +   *                              [7, 7, 7, 7], [8, 8, 8, 8]],
          +   *                             [[5, 5, 5, 5], [6, 6, 6, 6],
          +   *                              [7, 7, 7, 7], [8, 8, 8, 8]]])
          +   *      tensor = tf.ones([4, 4, 4],dtype=tf.int32)
          +   *      updated = tf.tensor_scatter_nd_update(tensor, indices, updates)
          +   *      print(updated)
          +   *  }
          + * The resulting tensor would look like this: + *

          + * [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], + * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] + *

          + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, the index is ignored. * - * @param data type for {@code outputFalse()} output - * @param data The tensor to be forwarded to the appropriate output. - * @param pred A scalar that specifies which output port will receive data. - * @return a new instance of SwitchCond + * @param data type for {@code output()} output + * @param tensor Tensor to copy/update. + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @return a new instance of TensorScatterNdUpdate */ - public SwitchCond switchCond(Operand data, Operand pred) { - return SwitchCond.create(scope, data, pred); + public TensorScatterNdUpdate tensorScatterNdUpdate( + Operand tensor, Operand indices, Operand updates) { + return TensorScatterNdUpdate.create(scope, tensor, indices, updates); } /** - * Returns a tensor that may be mutated, but only persists within a single step. - *

          - * This is an experimental op for internal use only and it is possible to use this - * op in unsafe ways. DO NOT USE unless you fully understand the risks. - *

          - * It is the caller's responsibility to ensure that 'ref' is eventually passed to a - * matching 'DestroyTemporaryVariable' op after all other uses have completed. + * Assign `value` to the sliced l-value reference of `input`. *

          - * Outputs a ref to the tensor state so it may be read or modified. + * The values of `value` are assigned to the positions in the tensor `input` that + * are selected by the slice parameters. The slice parameters `begin` `end` + * `strides` etc. work exactly as in `StridedSlice`. *

          - * E.g. - * var = state_ops._temporary_variable([1, 2], types.float_) - * var_name = var.op.name - * var = state_ops.assign(var, [[4.0, 5.0]]) - * var = state_ops.assign_add(var, [[6.0, 7.0]]) - * final = state_ops._destroy_temporary_variable(var, var_name=var_name) + * NOTE this op currently does not support broadcasting and so `value`'s shape + * must be exactly the shape produced by the slice of `input`. * - * @param data type for {@code ref()} output - * @param shape The shape of the variable tensor. - * @param dtype The type of elements in the variable tensor. + * @param data type for {@code output()} output + * @param input + * @param begin + * @param end + * @param strides + * @param value * @param options carries optional attributes values - * @return a new instance of TemporaryVariable + * @return a new instance of TensorStridedSliceUpdate */ - public TemporaryVariable temporaryVariable(Shape shape, DataType dtype, - TemporaryVariable.Options... options) { - return TemporaryVariable.create(scope, shape, dtype, options); + public TensorStridedSliceUpdate tensorStridedSliceUpdate( + Operand input, Operand begin, Operand end, Operand strides, Operand value, + TensorStridedSliceUpdate.Options... options) { + return TensorStridedSliceUpdate.create(scope, input, begin, end, strides, value, options); } /** - * An array of Tensors of given size. + * Constructs a tensor by tiling a given tensor. *

          - * Write data via Write and read via Read or Pack. + * This operation creates a new tensor by replicating `input` `multiples` times. + * The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, + * and the values of `input` are replicated `multiples[i]` times along the 'i'th + * dimension. For example, tiling `[a b c d]` by `[2]` produces + * `[a b c d a b c d]`. + *

          + * >>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32) + * >>> b = tf.constant([1,2], tf.int32) + * >>> tf.tile(a, b) + * + * >>> c = tf.constant([2,1], tf.int32) + * >>> tf.tile(a, c) + * + * >>> d = tf.constant([2,2], tf.int32) + * >>> tf.tile(a, d) + * * - * @param size The size of the array. - * @param dtype The type of the elements on the tensor_array. - * @param options carries optional attributes values - * @return a new instance of TensorArray + * @param data type for {@code output()} output + * @param input 1-D or higher. + * @param multiples 1-D. Length must be the same as the number of dimensions in `input` + * @return a new instance of Tile */ - public TensorArray tensorArray(Operand size, DataType dtype, - TensorArray.Options... options) { - return TensorArray.create(scope, size, dtype, options); + public Tile tile(Operand input, Operand multiples) { + return Tile.create(scope, input, multiples); } /** - * Delete the TensorArray from its resource container. + * Provides the time since epoch in seconds. *

          - * This enables the user to close and release the resource in the middle - * of a step/run. + * Returns the timestamp as a `float64` for seconds since the Unix epoch. + *

          + * Note: the timestamp is computed when the op is executed, not when it is added + * to the graph. * - * @param handle The handle to a TensorArray (output of TensorArray or TensorArrayGrad). - * @return a new instance of TensorArrayClose + * @return a new instance of Timestamp */ - public TensorArrayClose tensorArrayClose(Operand handle) { - return TensorArrayClose.create(scope, handle); + public Timestamp timestamp() { + return Timestamp.create(scope); } /** - * Concat the elements from the TensorArray into value `value`. + * Perform batches of RPC requests. *

          - * Takes `T` elements of shapes + * This op asynchronously performs either a single RPC request, or a batch + * of requests. RPC requests are defined by three main parameters: *

          - *

          {@code
          -   *    (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
          -   *    }
          - * and concatenates them into a Tensor of shape: + * - `address` (the host+port or BNS address of the request) + * - `method` (the method name for the request) + * - `request` (the serialized proto string, or vector of strings, + * of the RPC request argument). *

          - *

          {@code
          -   *  (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)}
          - * All elements must have the same shape (excepting the first dimension). + * For example, if you have an RPC service running on port localhost:2345, + * and its interface is configured with the following proto declaration: + *
          {@code
          +   *  service MyService {
          +   *    rpc MyMethod(MyRequestProto) returns (MyResponseProto) {
          +   *    }
          +   *  };
          +   *  }
          + * then call this op with arguments: + *
          {@code
          +   *  address = "localhost:2345"
          +   *  method = "MyService/MyMethod"
          +   *  }
          + * The `request` tensor is a string tensor representing serialized `MyRequestProto` + * strings; and the output string tensor `response` will have the same shape + * and contain (upon successful completion) corresponding serialized + * `MyResponseProto` strings. + *

          + * For example, to send a single, empty, `MyRequestProto`, call + * this op with `request = ""`. To send 5 parallel empty requests, + * call this op with `request = ["", "", "", "", ""]`. + *

          + * More generally, one can create a batch of `MyRequestProto` serialized protos + * from regular batched tensors using the `encode_proto` op, and convert + * the response `MyResponseProto` serialized protos to batched tensors + * using the `decode_proto` op. + *

          + * NOTE Working with serialized proto strings is faster than instantiating + * actual proto objects in memory, so no performance degradation is expected + * compared to writing custom kernels for this workflow. + *

          + * Unlike the standard `Rpc` op, if the connection fails or the remote worker + * returns an error status, this op does not reraise the exception. + * Instead, the `status_code` and `status_message` entry for the corresponding RPC + * call is set with the error returned from the RPC call. The `response` tensor + * will contain valid response values for those minibatch entries whose RPCs did + * not fail; the rest of the entries will have empty strings. * - * @param data type for {@code value()} output - * @param handle The handle to a TensorArray. - * @param flowIn A float scalar that enforces proper chaining of operations. - * @param dtype The type of the elem that is returned. + * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. + * If this tensor has more than 1 element, then multiple parallel rpc requests + * are sent. This argument broadcasts with `method` and `request`. + * @param method `0-D` or `1-D`. The method address on the RPC server. + * If this tensor has more than 1 element, then multiple parallel rpc requests + * are sent. This argument broadcasts with `address` and `request`. + * @param request `0-D` or `1-D`. Serialized proto strings: the rpc request argument. + * If this tensor has more than 1 element, then multiple parallel rpc requests + * are sent. This argument broadcasts with `address` and `method`. * @param options carries optional attributes values - * @return a new instance of TensorArrayConcat + * @return a new instance of TryRpc */ - public TensorArrayConcat tensorArrayConcat(Operand handle, - Operand flowIn, DataType dtype, TensorArrayConcat.Options... options) { - return TensorArrayConcat.create(scope, handle, flowIn, dtype, options); + public TryRpc tryRpc(Operand address, Operand method, Operand request, + TryRpc.Options... options) { + return TryRpc.create(scope, address, method, request, options); } /** - * Gather specific elements from the TensorArray into output `value`. + * Reverses the operation of Batch for a single output Tensor. *

          - * All elements selected by `indices` must have the same shape. + * An instance of Unbatch either receives an empty batched_tensor, in which case it + * asynchronously waits until the values become available from a concurrently + * running instance of Unbatch with the same container and shared_name, or receives + * a non-empty batched_tensor in which case it finalizes all other concurrently + * running instances and outputs its own element from the batch. + *

          + * batched_tensor: The possibly transformed output of Batch. The size of the first + * dimension should remain unchanged by the transformations for the operation to + * work. + * batch_index: The matching batch_index obtained from Batch. + * id: The id scalar emitted by Batch. + * unbatched_tensor: The Tensor corresponding to this execution. + * timeout_micros: Maximum amount of time (in microseconds) to wait to receive the + * batched input tensor associated with a given invocation of the op. + * container: Container to control resource sharing. + * shared_name: Instances of Unbatch with the same container and shared_name are + * assumed to possibly belong to the same batch. If left empty, the op name will + * be used as the shared name. * - * @param data type for {@code value()} output - * @param handle The handle to a TensorArray. - * @param indices The locations in the TensorArray from which to read tensor elements. - * @param flowIn A float scalar that enforces proper chaining of operations. - * @param dtype The type of the elem that is returned. + * @param data type for {@code unbatchedTensor()} output + * @param batchedTensor + * @param batchIndex + * @param id + * @param timeoutMicros * @param options carries optional attributes values - * @return a new instance of TensorArrayGather + * @return a new instance of Unbatch */ - public TensorArrayGather tensorArrayGather(Operand handle, - Operand indices, Operand flowIn, DataType dtype, - TensorArrayGather.Options... options) { - return TensorArrayGather.create(scope, handle, indices, flowIn, dtype, options); + public Unbatch unbatch(Operand batchedTensor, Operand batchIndex, + Operand id, Long timeoutMicros, Unbatch.Options... options) { + return Unbatch.create(scope, batchedTensor, batchIndex, id, timeoutMicros, options); } /** - * Creates a TensorArray for storing the gradients of values in the given handle. - *

          - * If the given TensorArray gradient already exists, returns a reference to it. - *

          - * Locks the size of the original TensorArray by disabling its dynamic size flag. - *

          - * *A note about the input flow_in:** - *

          - * The handle flow_in forces the execution of the gradient lookup to occur - * only after certain other operations have occurred. For example, when - * the forward TensorArray is dynamically sized, writes to this TensorArray - * may resize the object. The gradient TensorArray is statically sized based - * on the size of the forward TensorArray when this operation executes. - * Furthermore, the size of the forward TensorArray is frozen by this call. - * As a result, the flow is used to ensure that the call to generate the gradient - * TensorArray only happens after all writes are executed. - *

          - * In the case of dynamically sized TensorArrays, gradient computation should - * only be performed on read operations that have themselves been chained via - * flow to occur only after all writes have executed. That way the final size - * of the forward TensorArray is known when this operation is called. - *

          - * *A note about the source attribute:** - *

          - * TensorArray gradient calls use an accumulator TensorArray object. If - * multiple gradients are calculated and run in the same session, the multiple - * gradient nodes may accidentally flow through the same accumulator TensorArray. - * This double counts and generally breaks the TensorArray gradient flow. + * Gradient of Unbatch. *

          - * The solution is to identify which gradient call this particular - * TensorArray gradient is being called in. This is performed by identifying - * a unique string (e.g. "gradients", "gradients_1", ...) from the input - * gradient Tensor's name. This string is used as a suffix when creating - * the TensorArray gradient object here (the attribute `source`). + * Acts like Batch but using the given batch_index index of batching things as they + * become available. This ensures that the gradients are propagated back in the + * same session which did the forward pass. *

          - * The attribute `source` is added as a suffix to the forward TensorArray's - * name when performing the creation / lookup, so that each separate gradient - * calculation gets its own TensorArray accumulator. + * original_input: The input to the Unbatch operation this is the gradient of. + * batch_index: The batch_index given to the Unbatch operation this is the gradient + * of. + * grad: The downstream gradient. + * id: The id scalar emitted by Batch. + * batched_grad: The return value, either an empty tensor or the batched gradient. + * container: Container to control resource sharing. + * shared_name: Instances of UnbatchGrad with the same container and shared_name + * are assumed to possibly belong to the same batch. If left empty, the op name + * will be used as the shared name. * - * @param handle The handle to the forward TensorArray. - * @param flowIn A float scalar that enforces proper chaining of operations. - * @param source The gradient source string, used to decide which gradient TensorArray - * to return. - * @return a new instance of TensorArrayGrad + * @param data type for {@code batchedGrad()} output + * @param originalInput + * @param batchIndex + * @param grad + * @param id + * @param options carries optional attributes values + * @return a new instance of UnbatchGrad */ - public TensorArrayGrad tensorArrayGrad(Operand handle, Operand flowIn, - String source) { - return TensorArrayGrad.create(scope, handle, flowIn, source); + public UnbatchGrad unbatchGrad(Operand originalInput, + Operand batchIndex, Operand grad, Operand id, + UnbatchGrad.Options... options) { + return UnbatchGrad.create(scope, originalInput, batchIndex, grad, id, options); } /** - * Creates a TensorArray for storing multiple gradients of values in the given handle. + * Finds unique elements along an axis of a tensor. *

          - * Similar to TensorArrayGradV3. However it creates an accumulator with an - * expanded shape compared to the input TensorArray whose gradient is being - * computed. This enables multiple gradients for the same TensorArray to be - * calculated using the same accumulator. - * - * @param handle The handle to the forward TensorArray. - * @param flowIn A float scalar that enforces proper chaining of operations. - * @param shapeToPrepend An int32 vector representing a shape. Elements in the gradient accumulator will - * have shape which is this shape_to_prepend value concatenated with shape of the - * elements in the TensorArray corresponding to the input handle. - * @param source The gradient source string, used to decide which gradient TensorArray - * to return. - * @return a new instance of TensorArrayGradWithShape - */ - public TensorArrayGradWithShape tensorArrayGradWithShape(Operand handle, - Operand flowIn, Operand shapeToPrepend, String source) { - return TensorArrayGradWithShape.create(scope, handle, flowIn, shapeToPrepend, source); - } - - /** + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` that is the same size as + * the number of the elements in `x` along the `axis` dimension. It + * contains the index in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + *

          + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + *

          + * For example: + *

          {@code
          +   *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
          +   *  y, idx = unique(x)
          +   *  y ==> [1, 2, 4, 7, 8]
          +   *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
          +   *  }
          + * For an `2-D` tensor `x` with `axis = 0`: + *
          {@code
          +   *  # tensor 'x' is [[1, 0, 0],
          +   *  #                [1, 0, 0],
          +   *  #                [2, 0, 0]]
          +   *  y, idx = unique(x, axis=0)
          +   *  y ==> [[1, 0, 0],
          +   *         [2, 0, 0]]
          +   *  idx ==> [0, 0, 1]
          +   *  }
          + * For an `2-D` tensor `x` with `axis = 1`: + *
          {@code
          +   *  # tensor 'x' is [[1, 0, 0],
          +   *  #                [1, 0, 0],
          +   *  #                [2, 0, 0]]
          +   *  y, idx = unique(x, axis=1)
          +   *  y ==> [[1, 0],
          +   *         [1, 0],
          +   *         [2, 0]]
          +   *  idx ==> [0, 1, 1]
          +   *  }
          * - * @param data type for {@code value()} output - * @param handle - * @param flowIn - * @param dtype - * @param options carries optional attributes values - * @return a new instance of TensorArrayPack + * @param data type for {@code y()} output + * @param data type for {@code idx()} output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @return a new instance of Unique */ - public TensorArrayPack tensorArrayPack(Operand handle, - Operand flowIn, DataType dtype, TensorArrayPack.Options... options) { - return TensorArrayPack.create(scope, handle, flowIn, dtype, options); + public Unique unique(Operand x, + Operand axis) { + return Unique.create(scope, x, axis); } /** - * Read an element from the TensorArray into output `value`. - * - * @param data type for {@code value()} output - * @param handle The handle to a TensorArray. - * @param index - * @param flowIn A float scalar that enforces proper chaining of operations. - * @param dtype The type of the elem that is returned. - * @return a new instance of TensorArrayRead - */ - public TensorArrayRead tensorArrayRead(Operand handle, - Operand index, Operand flowIn, DataType dtype) { - return TensorArrayRead.create(scope, handle, index, flowIn, dtype); - } - - /** - * Scatter the data from the input value into specific TensorArray elements. + * Finds unique elements along an axis of a tensor. *

          - * `indices` must be a vector, its length must match the first dim of `value`. + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` that is the same size as + * the number of the elements in `x` along the `axis` dimension. It + * contains the index in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + *

          + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + *

          + * For example: + *

          {@code
          +   *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
          +   *  y, idx = unique(x)
          +   *  y ==> [1, 2, 4, 7, 8]
          +   *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
          +   *  }
          + * For an `2-D` tensor `x` with `axis = 0`: + *
          {@code
          +   *  # tensor 'x' is [[1, 0, 0],
          +   *  #                [1, 0, 0],
          +   *  #                [2, 0, 0]]
          +   *  y, idx = unique(x, axis=0)
          +   *  y ==> [[1, 0, 0],
          +   *         [2, 0, 0]]
          +   *  idx ==> [0, 0, 1]
          +   *  }
          + * For an `2-D` tensor `x` with `axis = 1`: + *
          {@code
          +   *  # tensor 'x' is [[1, 0, 0],
          +   *  #                [1, 0, 0],
          +   *  #                [2, 0, 0]]
          +   *  y, idx = unique(x, axis=1)
          +   *  y ==> [[1, 0],
          +   *         [1, 0],
          +   *         [2, 0]]
          +   *  idx ==> [0, 1, 1]
          +   *  }
          * - * @param handle The handle to a TensorArray. - * @param indices The locations at which to write the tensor elements. - * @param value The concatenated tensor to write to the TensorArray. - * @param flowIn A float scalar that enforces proper chaining of operations. - * @return a new instance of TensorArrayScatter + * @param data type for {@code y()} output + * @param data type for {@code idx()} output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @param outIdx + * @return a new instance of Unique */ - public TensorArrayScatter tensorArrayScatter(Operand handle, - Operand indices, Operand value, Operand flowIn) { - return TensorArrayScatter.create(scope, handle, indices, value, flowIn); + public Unique unique(Operand x, + Operand axis, DataType outIdx) { + return Unique.create(scope, x, axis, outIdx); } /** - * Get the current size of the TensorArray. + * Finds unique elements along an axis of a tensor. + *

          + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` and a tensor `count` + * that are the same size as the number of the elements in `x` along the + * `axis` dimension. The `idx` contains the index in the unique output `y` + * and the `count` contains the count in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + *

          + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + *

          + * For example: + *

          {@code
          +   *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
          +   *  y, idx, count = unique_with_counts(x)
          +   *  y ==> [1, 2, 4, 7, 8]
          +   *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
          +   *  count ==> [2, 1, 3, 1, 2]
          +   *  }
          + * For an `2-D` tensor `x` with `axis = 0`: + *
          {@code
          +   *  # tensor 'x' is [[1, 0, 0],
          +   *  #                [1, 0, 0],
          +   *  #                [2, 0, 0]]
          +   *  y, idx, count = unique_with_counts(x, axis=0)
          +   *  y ==> [[1, 0, 0],
          +   *         [2, 0, 0]]
          +   *  idx ==> [0, 0, 1]
          +   *  count ==> [2, 1]
          +   *  }
          + * For an `2-D` tensor `x` with `axis = 1`: + *
          {@code
          +   *  # tensor 'x' is [[1, 0, 0],
          +   *  #                [1, 0, 0],
          +   *  #                [2, 0, 0]]
          +   *  y, idx, count = unique_with_counts(x, axis=1)
          +   *  y ==> [[1, 0],
          +   *         [1, 0],
          +   *         [2, 0]]
          +   *  idx ==> [0, 1, 1]
          +   *  count ==> [1, 2]
          +   *  }
          * - * @param handle The handle to a TensorArray (output of TensorArray or TensorArrayGrad). - * @param flowIn A float scalar that enforces proper chaining of operations. - * @return a new instance of TensorArraySize + * @param data type for {@code y()} output + * @param data type for {@code idx()} output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @return a new instance of UniqueWithCounts */ - public TensorArraySize tensorArraySize(Operand handle, Operand flowIn) { - return TensorArraySize.create(scope, handle, flowIn); + public UniqueWithCounts uniqueWithCounts( + Operand x, Operand axis) { + return UniqueWithCounts.create(scope, x, axis); } /** - * Split the data from the input value into TensorArray elements. - *

          - * Assuming that `lengths` takes on values - *

          - *

          {@code
          -   *  (n0, n1, ..., n(T-1))}
          - * and that `value` has shape - *

          - *

          {@code
          -   *  (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)}
          - * , - *

          - * this splits values into a TensorArray with T tensors. + * Finds unique elements along an axis of a tensor. *

          - * TensorArray index t will be the subtensor of values with starting position + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` and a tensor `count` + * that are the same size as the number of the elements in `x` along the + * `axis` dimension. The `idx` contains the index in the unique output `y` + * and the `count` contains the count in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: *

          - *

          {@code
          -   *  (n0 + n1 + ... + n(t-1), 0, 0, ...)}
          - * and having size + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` *

          - *

          {@code
          -   *  nt x d0 x d1 x ...}
          + * For example: + *
          {@code
          +   *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
          +   *  y, idx, count = unique_with_counts(x)
          +   *  y ==> [1, 2, 4, 7, 8]
          +   *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
          +   *  count ==> [2, 1, 3, 1, 2]
          +   *  }
          + * For an `2-D` tensor `x` with `axis = 0`: + *
          {@code
          +   *  # tensor 'x' is [[1, 0, 0],
          +   *  #                [1, 0, 0],
          +   *  #                [2, 0, 0]]
          +   *  y, idx, count = unique_with_counts(x, axis=0)
          +   *  y ==> [[1, 0, 0],
          +   *         [2, 0, 0]]
          +   *  idx ==> [0, 0, 1]
          +   *  count ==> [2, 1]
          +   *  }
          + * For an `2-D` tensor `x` with `axis = 1`: + *
          {@code
          +   *  # tensor 'x' is [[1, 0, 0],
          +   *  #                [1, 0, 0],
          +   *  #                [2, 0, 0]]
          +   *  y, idx, count = unique_with_counts(x, axis=1)
          +   *  y ==> [[1, 0],
          +   *         [1, 0],
          +   *         [2, 0]]
          +   *  idx ==> [0, 1, 1]
          +   *  count ==> [1, 2]
          +   *  }
          * - * @param handle The handle to a TensorArray. - * @param value The concatenated tensor to write to the TensorArray. - * @param lengths The vector of lengths, how to split the rows of value into the - * TensorArray. - * @param flowIn A float scalar that enforces proper chaining of operations. - * @return a new instance of TensorArraySplit + * @param data type for {@code y()} output + * @param data type for {@code idx()} output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @param outIdx + * @return a new instance of UniqueWithCounts */ - public TensorArraySplit tensorArraySplit(Operand handle, Operand value, - Operand lengths, Operand flowIn) { - return TensorArraySplit.create(scope, handle, value, lengths, flowIn); + public UniqueWithCounts uniqueWithCounts( + Operand x, Operand axis, DataType outIdx) { + return UniqueWithCounts.create(scope, x, axis, outIdx); } /** + * Converts an array of flat indices into a tuple of coordinate arrays. + *

          * - * @param handle - * @param value - * @param flowIn - * @return a new instance of TensorArrayUnpack - */ - public TensorArrayUnpack tensorArrayUnpack(Operand handle, - Operand value, Operand flowIn) { - return TensorArrayUnpack.create(scope, handle, value, flowIn); - } - - /** - * Push an element onto the tensor_array. + * Example: + *

          {@code
          +   *  y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3])
          +   *  # 'dims' represent a hypothetical (3, 3) tensor of indices:
          +   *  # [[0, 1, *2*],
          +   *  #  [3, 4, *5*],
          +   *  #  [6, *7*, 8]]
          +   *  # For each entry from 'indices', this operation returns
          +   *  # its coordinates (marked with '*'), such as
          +   *  # 2 ==> (0, 2)
          +   *  # 5 ==> (1, 2)
          +   *  # 7 ==> (2, 1)
          +   *  y ==> [[0, 1, 2], [2, 2, 1]]
          +   *  }
          * - * @param handle The handle to a TensorArray. - * @param index The position to write to inside the TensorArray. - * @param value The tensor to write to the TensorArray. - * @param flowIn A float scalar that enforces proper chaining of operations. - * @return a new instance of TensorArrayWrite + * @compatibility(numpy) Equivalent to np.unravel_index + * @end_compatibility + * @param data type for {@code output()} output + * @param indices An 0-D or 1-D `int` Tensor whose elements are indices into the + * flattened version of an array of dimensions dims. + * @param dims An 1-D `int` Tensor. The shape of the array to use for unraveling + * indices. + * @return a new instance of UnravelIndex */ - public TensorArrayWrite tensorArrayWrite(Operand handle, - Operand index, Operand value, Operand flowIn) { - return TensorArrayWrite.create(scope, handle, index, value, flowIn); + public UnravelIndex unravelIndex(Operand indices, Operand dims) { + return UnravelIndex.create(scope, indices, dims); } /** - * Concats all tensors in the list along the 0th dimension. + * Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors. *

          - * Requires that all tensors have the same shape except the first dimension. + * Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. + * For example, given a tensor of shape `(A, B, C, D)`; *

          - * input_handle: The input list. - * element_shape: The shape of the uninitialized elements in the list. If the first - * dimension is not -1, it is assumed that all list elements have the same - * leading dim. - * leading_dims: The list of leading dims of uninitialized list elements. Used if - * the leading dim of input_handle.element_shape or the element_shape input arg - * is not already set. - * tensor: The concated result. - * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient. + * If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` + * and each tensor in `output` will have shape `(B, C, D)`. (Note that the + * dimension unpacked along is gone, unlike `split`). + *

          + * If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` + * and each tensor in `output` will have shape `(A, C, D)`. + * Etc. + *

          + * This is the opposite of `pack`. * - * @param data type for {@code tensor()} output - * @param inputHandle - * @param elementShape - * @param leadingDims - * @param elementDtype - * @return a new instance of TensorListConcat + * @param data type for {@code output()} output + * @param value 1-D or higher, with `axis` dimension size equal to `num`. + * @param num + * @param options carries optional attributes values + * @return a new instance of Unstack */ - public TensorListConcat tensorListConcat( - Operand inputHandle, Operand elementShape, Operand leadingDims, - DataType elementDtype) { - return TensorListConcat.create(scope, inputHandle, elementShape, leadingDims, elementDtype); + public Unstack unstack(Operand value, Long num, + Unstack.Options... options) { + return Unstack.create(scope, value, num, options); } /** + * Op is similar to a lightweight Dequeue. + *

          + * The basic functionality is similar to dequeue with many fewer + * capabilities and options. This Op is optimized for performance. * - * @param inputA - * @param inputB - * @param elementDtype - * @return a new instance of TensorListConcatLists + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of Unstage */ - public TensorListConcatLists tensorListConcatLists(Operand inputA, - Operand inputB, DataType elementDtype) { - return TensorListConcatLists.create(scope, inputA, inputB, elementDtype); + public Unstage unstage(List> dtypes, Unstage.Options... options) { + return Unstage.create(scope, dtypes, options); } /** - * The shape of the elements of the given list, as a tensor. - *

          - * input_handle: the list - * element_shape: the shape of elements of the list + * Creates a rank-1 constant of {@code long} elements. * - * @param data type for {@code elementShape()} output - * @param inputHandle - * @param shapeType - * @return a new instance of TensorListElementShape + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a long constant */ - public TensorListElementShape tensorListElementShape( - Operand inputHandle, DataType shapeType) { - return TensorListElementShape.create(scope, inputHandle, shapeType); + public Constant val(long[] data) { + return Constant.vectorOf(scope, data); } /** - * Creates a TensorList which, when stacked, has the value of `tensor`. - *

          - * Each tensor in the result list corresponds to one row of the input tensor. - *

          - * tensor: The input tensor. - * output_handle: The list. + * Creates a rank-6 constant of {@code double} elements. * - * @param tensor - * @param elementShape - * @return a new instance of TensorListFromTensor + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a double constant */ - public TensorListFromTensor tensorListFromTensor( - Operand tensor, Operand elementShape) { - return TensorListFromTensor.create(scope, tensor, elementShape); + public Constant val(double[][][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a Tensor by indexing into the TensorList. - *

          - * Each row in the produced Tensor corresponds to the element in the TensorList - * specified by the given index (see `tf.gather`). - *

          - * input_handle: The input tensor list. - * indices: The indices used to index into the list. - * values: The tensor. + * Creates a constant of {@code byte} elements that is a copy of a given n-dimensional array. * - * @param data type for {@code values()} output - * @param inputHandle - * @param indices - * @param elementShape - * @param elementDtype - * @return a new instance of TensorListGather + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code byte} elements. + * @return a byte constant */ - public TensorListGather tensorListGather(Operand inputHandle, - Operand indices, Operand elementShape, DataType elementDtype) { - return TensorListGather.create(scope, inputHandle, indices, elementShape, elementDtype); + public Constant val(ByteNdArray data) { + return Constant.tensorOf(scope, data); } /** + * Creates a rank-4 constant of {@code boolean} elements. * - * @param data type for {@code item()} output - * @param inputHandle - * @param index - * @param elementShape - * @param elementDtype - * @return a new instance of TensorListGetItem + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a boolean constant */ - public TensorListGetItem tensorListGetItem(Operand inputHandle, - Operand index, Operand elementShape, DataType elementDtype) { - return TensorListGetItem.create(scope, inputHandle, index, elementShape, elementDtype); + public Constant val(boolean[][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Returns the number of tensors in the input tensor list. - *

          - * input_handle: the input list - * length: the number of tensors in the list + * Creates a constant of {@code float} elements that is a copy of a given n-dimensional array. * - * @param inputHandle - * @return a new instance of TensorListLength + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code float} elements. + * @return a float constant */ - public TensorListLength tensorListLength(Operand inputHandle) { - return TensorListLength.create(scope, inputHandle); + public Constant val(FloatNdArray data) { + return Constant.tensorOf(scope, data); } /** - * Returns the last element of the input list as well as a list with all but that element. - *

          - * Fails if the list is empty. - *

          - * input_handle: the input list - * tensor: the withdrawn last element of the list - * element_dtype: the type of elements in the list - * element_shape: the shape of the output tensor + * Creates a rank-6 constant of {@code boolean} elements. * - * @param data type for {@code tensor()} output - * @param inputHandle - * @param elementShape - * @param elementDtype - * @return a new instance of TensorListPopBack + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a boolean constant */ - public TensorListPopBack tensorListPopBack(Operand inputHandle, - Operand elementShape, DataType elementDtype) { - return TensorListPopBack.create(scope, inputHandle, elementShape, elementDtype); + public Constant val(boolean[][][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Returns a list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`. - *

          - * tensor: The tensor to put on the list. - * input_handle: The old list. - * output_handle: A list with the elements of the old list followed by tensor. - * element_dtype: the type of elements in the list. - * element_shape: a shape compatible with that of elements in the list. + * Creates a rank-2 constant of {@code long} elements. * - * @param inputHandle - * @param tensor - * @return a new instance of TensorListPushBack + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a long constant */ - public TensorListPushBack tensorListPushBack(Operand inputHandle, - Operand tensor) { - return TensorListPushBack.create(scope, inputHandle, tensor); + public Constant val(long[][] data) { + return Constant.tensorOf(scope, data); } /** + * Creates a constant of {@code double} elements that is a copy of a given n-dimensional array. * - * @param inputHandles - * @param tensor - * @return a new instance of TensorListPushBackBatch + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code double} elements. + * @return a double constant */ - public TensorListPushBackBatch tensorListPushBackBatch(Operand inputHandles, - Operand tensor) { - return TensorListPushBackBatch.create(scope, inputHandles, tensor); + public Constant val(DoubleNdArray data) { + return Constant.tensorOf(scope, data); } /** - * List of the given size with empty elements. - *

          - * element_shape: the shape of the future elements of the list - * num_elements: the number of elements to reserve - * handle: the output list - * element_dtype: the desired type of elements in the list. + * Creates a rank-3 constant of {@code boolean} elements. * - * @param elementShape - * @param numElements - * @param elementDtype - * @return a new instance of TensorListReserve + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a boolean constant */ - public TensorListReserve tensorListReserve( - Operand elementShape, Operand numElements, DataType elementDtype) { - return TensorListReserve.create(scope, elementShape, numElements, elementDtype); + public Constant val(boolean[][][] data) { + return Constant.tensorOf(scope, data); } /** - * Resizes the list. - *

          - * - * input_handle: the input list - * size: size of the output list + * Creates a constant containing a single {@code float} element. * - * @param inputHandle - * @param size - * @return a new instance of TensorListResize + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a float constant */ - public TensorListResize tensorListResize(Operand inputHandle, Operand size) { - return TensorListResize.create(scope, inputHandle, size); + public Constant val(float data) { + return Constant.scalarOf(scope, data); } /** - * Creates a TensorList by indexing into a Tensor. - *

          - * Each member of the TensorList corresponds to one row of the input tensor, - * specified by the given index (see `tf.gather`). - *

          - * tensor: The input tensor. - * indices: The indices used to index into the list. - * element_shape: The shape of the elements in the list (can be less specified than - * the shape of the tensor). - * num_elements: The size of the output list. Must be large enough to accommodate - * the largest index in indices. If -1, the list is just large enough to include - * the largest index in indices. - * output_handle: The TensorList. + * Creates a rank-6 constant of {@code float} elements. * - * @param tensor - * @param indices - * @param elementShape - * @param numElements - * @return a new instance of TensorListScatter + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a float constant */ - public TensorListScatter tensorListScatter(Operand tensor, - Operand indices, Operand elementShape, Operand numElements) { - return TensorListScatter.create(scope, tensor, indices, elementShape, numElements); + public Constant val(float[][][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Scatters tensor at indices in an input list. - *

          - * Each member of the TensorList corresponds to one row of the input tensor, - * specified by the given index (see `tf.gather`). - *

          - * input_handle: The list to scatter into. - * tensor: The input tensor. - * indices: The indices used to index into the list. - * output_handle: The TensorList. + * Creates a rank-5 constant of {@code byte} elements. * - * @param inputHandle - * @param tensor - * @param indices - * @return a new instance of TensorListScatterIntoExistingList + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - public TensorListScatterIntoExistingList tensorListScatterIntoExistingList( - Operand inputHandle, Operand tensor, Operand indices) { - return TensorListScatterIntoExistingList.create(scope, inputHandle, tensor, indices); + public Constant val(byte[][][][][] data) { + return Constant.tensorOf(scope, data); } /** + * Creates a rank-4 constant of {@code float} elements. * - * @param inputHandle - * @param index - * @param item - * @return a new instance of TensorListSetItem - */ - public TensorListSetItem tensorListSetItem(Operand inputHandle, - Operand index, Operand item) { - return TensorListSetItem.create(scope, inputHandle, index, item); + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a float constant + */ + public Constant val(float[][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Splits a tensor into a list. - *

          - * list[i] corresponds to lengths[i] tensors from the input tensor. - * The tensor must have rank at least 1 and contain exactly sum(lengths) elements. - *

          - * tensor: The input tensor. - * element_shape: A shape compatible with that of elements in the tensor. - * lengths: Vector of sizes of the 0th dimension of tensors in the list. - * output_handle: The list. + * Creates a rank-5 constant of {@code double} elements. * - * @param tensor - * @param elementShape - * @param lengths - * @return a new instance of TensorListSplit + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a double constant */ - public TensorListSplit tensorListSplit(Operand tensor, - Operand elementShape, Operand lengths) { - return TensorListSplit.create(scope, tensor, elementShape, lengths); + public Constant val(double[][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Stacks all tensors in the list. - *

          - * Requires that all tensors have the same shape. - *

          - * input_handle: the input list - * tensor: the gathered result - * num_elements: optional. If not -1, the number of elements in the list. + * Creates a constant containing a single {@code int} element. * - * @param data type for {@code tensor()} output - * @param inputHandle - * @param elementShape - * @param elementDtype - * @param options carries optional attributes values - * @return a new instance of TensorListStack + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return an integer constant */ - public TensorListStack tensorListStack(Operand inputHandle, - Operand elementShape, DataType elementDtype, TensorListStack.Options... options) { - return TensorListStack.create(scope, inputHandle, elementShape, elementDtype, options); + public Constant val(int data) { + return Constant.scalarOf(scope, data); } /** - * Adds sparse `updates` to an existing tensor according to `indices`. - *

          - * This operation creates a new tensor by adding sparse `updates` to the passed - * in `tensor`. - * This operation is very similar to `tf.scatter_nd_add`, except that the updates - * are added onto an existing tensor (as opposed to a variable). If the memory - * for the existing tensor cannot be re-used, a copy is made and updated. - *

          - * `indices` is an integer tensor containing indices into a new tensor of shape - * `shape`. The last dimension of `indices` can be at most the rank of `shape`: - *

          - * indices.shape[-1] <= shape.rank - *

          - * The last dimension of `indices` corresponds to indices into elements - * (if `indices.shape[-1] = shape.rank`) or slices - * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of - * `shape`. `updates` is a tensor with shape - *

          - * indices.shape[:-1] + shape[indices.shape[-1]:] - *

          - * The simplest form of tensor_scatter_add is to add individual elements to a - * tensor by index. For example, say we want to add 4 elements in a rank-1 - * tensor with 8 elements. - *

          - * In Python, this scatter add operation would look like this: - *

          {@code
          -   *      indices = tf.constant([[4], [3], [1], [7]])
          -   *      updates = tf.constant([9, 10, 11, 12])
          -   *      tensor = tf.ones([8], dtype=tf.int32)
          -   *      updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
          -   *      print(updated)
          -   *  }
          - * The resulting tensor would look like this: - *

          - * [1, 12, 1, 11, 10, 1, 1, 13] - *

          - * We can also, insert entire slices of a higher rank tensor all at once. For - * example, if we wanted to insert two slices in the first dimension of a - * rank-3 tensor with two matrices of new values. - *

          - * In Python, this scatter add operation would look like this: - *

          {@code
          -   *      indices = tf.constant([[0], [2]])
          -   *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
          -   *                              [7, 7, 7, 7], [8, 8, 8, 8]],
          -   *                             [[5, 5, 5, 5], [6, 6, 6, 6],
          -   *                              [7, 7, 7, 7], [8, 8, 8, 8]]])
          -   *      tensor = tf.ones([4, 4, 4],dtype=tf.int32)
          -   *      updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
          -   *      print(updated)
          -   *  }
          - * The resulting tensor would look like this: - *

          - * [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], - * [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] - *

          - * Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, the index is ignored. + * Creates a rank-3 constant of {@code float} elements. * - * @param data type for {@code output()} output - * @param tensor Tensor to copy/update. - * @param indices Index tensor. - * @param updates Updates to scatter into output. - * @return a new instance of TensorScatterNdAdd + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a float constant */ - public TensorScatterNdAdd tensorScatterNdAdd( - Operand tensor, Operand indices, Operand updates) { - return TensorScatterNdAdd.create(scope, tensor, indices, updates); + public Constant val(float[][][] data) { + return Constant.tensorOf(scope, data); } /** - * Subtracts sparse `updates` from an existing tensor according to `indices`. - *

          - * This operation creates a new tensor by subtracting sparse `updates` from the - * passed in `tensor`. - * This operation is very similar to `tf.scatter_nd_sub`, except that the updates - * are subtracted from an existing tensor (as opposed to a variable). If the memory - * for the existing tensor cannot be re-used, a copy is made and updated. - *

          - * `indices` is an integer tensor containing indices into a new tensor of shape - * `shape`. The last dimension of `indices` can be at most the rank of `shape`: - *

          - * indices.shape[-1] <= shape.rank - *

          - * The last dimension of `indices` corresponds to indices into elements - * (if `indices.shape[-1] = shape.rank`) or slices - * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of - * `shape`. `updates` is a tensor with shape - *

          - * indices.shape[:-1] + shape[indices.shape[-1]:] - *

          - * The simplest form of tensor_scatter_sub is to subtract individual elements - * from a tensor by index. For example, say we want to insert 4 scattered elements - * in a rank-1 tensor with 8 elements. - *

          - * In Python, this scatter subtract operation would look like this: - *

          {@code
          -   *      indices = tf.constant([[4], [3], [1], [7]])
          -   *      updates = tf.constant([9, 10, 11, 12])
          -   *      tensor = tf.ones([8], dtype=tf.int32)
          -   *      updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
          -   *      print(updated)
          -   *  }
          - * The resulting tensor would look like this: - *

          - * [1, -10, 1, -9, -8, 1, 1, -11] - *

          - * We can also, insert entire slices of a higher rank tensor all at once. For - * example, if we wanted to insert two slices in the first dimension of a - * rank-3 tensor with two matrices of new values. - *

          - * In Python, this scatter add operation would look like this: - *

          {@code
          -   *      indices = tf.constant([[0], [2]])
          -   *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
          -   *                              [7, 7, 7, 7], [8, 8, 8, 8]],
          -   *                             [[5, 5, 5, 5], [6, 6, 6, 6],
          -   *                              [7, 7, 7, 7], [8, 8, 8, 8]]])
          -   *      tensor = tf.ones([4, 4, 4],dtype=tf.int32)
          -   *      updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
          -   *      print(updated)
          -   *  }
          - * The resulting tensor would look like this: - *

          - * [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], - * [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] - *

          - * Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, the index is ignored. + * Creates a rank-1 constant of {@code byte} elements. * - * @param data type for {@code output()} output - * @param tensor Tensor to copy/update. - * @param indices Index tensor. - * @param updates Updates to scatter into output. - * @return a new instance of TensorScatterNdSub + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - public TensorScatterNdSub tensorScatterNdSub( - Operand tensor, Operand indices, Operand updates) { - return TensorScatterNdSub.create(scope, tensor, indices, updates); + public Constant val(byte[] data) { + return Constant.vectorOf(scope, data); } /** - * Scatter `updates` into an existing tensor according to `indices`. - *

          - * This operation creates a new tensor by applying sparse `updates` to the passed - * in `tensor`. - * This operation is very similar to `tf.scatter_nd`, except that the updates are - * scattered onto an existing tensor (as opposed to a zero-tensor). If the memory - * for the existing tensor cannot be re-used, a copy is made and updated. - *

          - * If `indices` contains duplicates, then their updates are accumulated (summed). - *

          - * WARNING: The order in which updates are applied is nondeterministic, so the - * output will be nondeterministic if `indices` contains duplicates -- because - * of some numerical approximation issues, numbers summed in different order - * may yield different results. - *

          - * `indices` is an integer tensor containing indices into a new tensor of shape - * `shape`. The last dimension of `indices` can be at most the rank of `shape`: - *

          - * indices.shape[-1] <= shape.rank - *

          - * The last dimension of `indices` corresponds to indices into elements - * (if `indices.shape[-1] = shape.rank`) or slices - * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of - * `shape`. `updates` is a tensor with shape - *

          - * indices.shape[:-1] + shape[indices.shape[-1]:] - *

          - * The simplest form of scatter is to insert individual elements in a tensor by - * index. For example, say we want to insert 4 scattered elements in a rank-1 - * tensor with 8 elements. - *

          - *

          - * - *
          - *

          - * In Python, this scatter operation would look like this: - *

          {@code
          -   *      indices = tf.constant([[4], [3], [1], [7]])
          -   *      updates = tf.constant([9, 10, 11, 12])
          -   *      tensor = tf.ones([8], dtype=tf.int32)
          -   *      updated = tf.tensor_scatter_nd_update(tensor, indices, updates)
          -   *      print(updated)
          -   *  }
          - * The resulting tensor would look like this: - *

          - * [1, 11, 1, 10, 9, 1, 1, 12] - *

          - * We can also, insert entire slices of a higher rank tensor all at once. For - * example, if we wanted to insert two slices in the first dimension of a - * rank-3 tensor with two matrices of new values. - *

          - * In Python, this scatter operation would look like this: - *

          {@code
          -   *      indices = tf.constant([[0], [2]])
          -   *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
          -   *                              [7, 7, 7, 7], [8, 8, 8, 8]],
          -   *                             [[5, 5, 5, 5], [6, 6, 6, 6],
          -   *                              [7, 7, 7, 7], [8, 8, 8, 8]]])
          -   *      tensor = tf.ones([4, 4, 4],dtype=tf.int32)
          -   *      updated = tf.tensor_scatter_nd_update(tensor, indices, updates)
          -   *      print(updated)
          -   *  }
          - * The resulting tensor would look like this: - *

          - * [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], - * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] - *

          - * Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, the index is ignored. + * Creates a rank-4 constant of {@code double} elements. * - * @param data type for {@code output()} output - * @param tensor Tensor to copy/update. - * @param indices Index tensor. - * @param updates Updates to scatter into output. - * @return a new instance of TensorScatterNdUpdate + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a double constant + */ + public Constant val(double[][][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a constant of {@code boolean} elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code boolean} elements. + * @return a boolean constant + */ + public Constant val(BooleanNdArray data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-5 constant of {@code boolean} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a boolean constant + */ + public Constant val(boolean[][][][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-3 constant of {@code int} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return an integer constant + */ + public Constant val(int[][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a {@code String} constant using the default, UTF-8 encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param data The string to put into the new constant. + * @return a string constant + */ + public Constant val(String data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a constant containing a single {@code double} element. + * + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a double constant + */ + public Constant val(double data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a rank-1 constant of {@code int} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return an integer constant + */ + public Constant val(int[] data) { + return Constant.vectorOf(scope, data); + } + + /** + * Creates a rank-5 constant of {@code long} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a long constant + */ + public Constant val(long[][][][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-2 constant of {@code byte} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant + */ + public Constant val(byte[][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-6 constant of {@code int} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return an integer constant + */ + public Constant val(int[][][][][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a constant of {@code long} elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code long} elements. + * @return a long constant + */ + public Constant val(LongNdArray data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-2 constant of {@code int} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return an integer constant + */ + public Constant val(int[][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a constant containing a single {@code boolean} element. + * + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a boolean constant + */ + public Constant val(boolean data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a rank-1 constant of {@code double} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a double constant + */ + public Constant val(double[] data) { + return Constant.vectorOf(scope, data); + } + + /** + * Creates a rank-1 constant of {@code float} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a float constant + */ + public Constant val(float[] data) { + return Constant.vectorOf(scope, data); + } + + /** + * Creates a rank-5 constant of {@code int} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return an integer constant + */ + public Constant val(int[][][][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-4 constant of {@code int} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return an integer constant + */ + public Constant val(int[][][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-3 constant of {@code byte} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant + */ + public Constant val(byte[][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-4 constant of {@code long} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a long constant + */ + public Constant val(long[][][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-2 constant of {@code double} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a double constant + */ + public Constant val(double[][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a constant containing a single {@code long} element. + * + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a long constant + */ + public Constant val(long data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a constant containing a single {@code byte} element. + * + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a byte constant + */ + public Constant val(byte data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a rank-3 constant of {@code long} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a long constant */ - public TensorScatterNdUpdate tensorScatterNdUpdate( - Operand tensor, Operand indices, Operand updates) { - return TensorScatterNdUpdate.create(scope, tensor, indices, updates); + public Constant val(long[][][] data) { + return Constant.tensorOf(scope, data); } /** - * Assign `value` to the sliced l-value reference of `input`. - *

          - * The values of `value` are assigned to the positions in the tensor `input` that - * are selected by the slice parameters. The slice parameters `begin` `end` - * `strides` etc. work exactly as in `StridedSlice`. - *

          - * NOTE this op currently does not support broadcasting and so `value`'s shape - * must be exactly the shape produced by the slice of `input`. + * Creates a rank-5 constant of {@code float} elements. * - * @param data type for {@code output()} output - * @param input - * @param begin - * @param end - * @param strides - * @param value - * @param options carries optional attributes values - * @return a new instance of TensorStridedSliceUpdate + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a float constant */ - public TensorStridedSliceUpdate tensorStridedSliceUpdate( - Operand input, Operand begin, Operand end, Operand strides, Operand value, - TensorStridedSliceUpdate.Options... options) { - return TensorStridedSliceUpdate.create(scope, input, begin, end, strides, value, options); + public Constant val(float[][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Constructs a tensor by tiling a given tensor. - *

          - * This operation creates a new tensor by replicating `input` `multiples` times. - * The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, - * and the values of `input` are replicated `multiples[i]` times along the 'i'th - * dimension. For example, tiling `[a b c d]` by `[2]` produces - * `[a b c d a b c d]`. - *

          - * >>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32) - * >>> b = tf.constant([1,2], tf.int32) - * >>> tf.tile(a, b) - * - * >>> c = tf.constant([2,1], tf.int32) - * >>> tf.tile(a, c) - * - * >>> d = tf.constant([2,2], tf.int32) - * >>> tf.tile(a, d) - * + * Creates a rank-6 constant of {@code byte} elements. * - * @param data type for {@code output()} output - * @param input 1-D or higher. - * @param multiples 1-D. Length must be the same as the number of dimensions in `input` - * @return a new instance of Tile + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - public Tile tile(Operand input, Operand multiples) { - return Tile.create(scope, input, multiples); + public Constant val(byte[][][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Provides the time since epoch in seconds. - *

          - * Returns the timestamp as a `float64` for seconds since the Unix epoch. - *

          - * Note: the timestamp is computed when the op is executed, not when it is added - * to the graph. + * Creates a rank-3 constant of {@code double} elements. * - * @return a new instance of Timestamp + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a double constant */ - public Timestamp timestamp() { - return Timestamp.create(scope); + public Constant val(double[][][] data) { + return Constant.tensorOf(scope, data); } /** - * Perform batches of RPC requests. - *

          - * This op asynchronously performs either a single RPC request, or a batch - * of requests. RPC requests are defined by three main parameters: - *

          - * - `address` (the host+port or BNS address of the request) - * - `method` (the method name for the request) - * - `request` (the serialized proto string, or vector of strings, - * of the RPC request argument). - *

          - * For example, if you have an RPC service running on port localhost:2345, - * and its interface is configured with the following proto declaration: - *

          {@code
          -   *  service MyService {
          -   *    rpc MyMethod(MyRequestProto) returns (MyResponseProto) {
          -   *    }
          -   *  };
          -   *  }
          - * then call this op with arguments: - *
          {@code
          -   *  address = "localhost:2345"
          -   *  method = "MyService/MyMethod"
          -   *  }
          - * The `request` tensor is a string tensor representing serialized `MyRequestProto` - * strings; and the output string tensor `response` will have the same shape - * and contain (upon successful completion) corresponding serialized - * `MyResponseProto` strings. - *

          - * For example, to send a single, empty, `MyRequestProto`, call - * this op with `request = ""`. To send 5 parallel empty requests, - * call this op with `request = ["", "", "", "", ""]`. - *

          - * More generally, one can create a batch of `MyRequestProto` serialized protos - * from regular batched tensors using the `encode_proto` op, and convert - * the response `MyResponseProto` serialized protos to batched tensors - * using the `decode_proto` op. - *

          - * NOTE Working with serialized proto strings is faster than instantiating - * actual proto objects in memory, so no performance degradation is expected - * compared to writing custom kernels for this workflow. - *

          - * Unlike the standard `Rpc` op, if the connection fails or the remote worker - * returns an error status, this op does not reraise the exception. - * Instead, the `status_code` and `status_message` entry for the corresponding RPC - * call is set with the error returned from the RPC call. The `response` tensor - * will contain valid response values for those minibatch entries whose RPCs did - * not fail; the rest of the entries will have empty strings. + * Creates a rank-2 constant of {@code boolean} elements. * - * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `method` and `request`. - * @param method `0-D` or `1-D`. The method address on the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `address` and `request`. - * @param request `0-D` or `1-D`. Serialized proto strings: the rpc request argument. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `address` and `method`. - * @param options carries optional attributes values - * @return a new instance of TryRpc + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a boolean constant */ - public TryRpc tryRpc(Operand address, Operand method, Operand request, - TryRpc.Options... options) { - return TryRpc.create(scope, address, method, request, options); + public Constant val(boolean[][] data) { + return Constant.tensorOf(scope, data); } /** - * Reverses the operation of Batch for a single output Tensor. - *

          - * An instance of Unbatch either receives an empty batched_tensor, in which case it - * asynchronously waits until the values become available from a concurrently - * running instance of Unbatch with the same container and shared_name, or receives - * a non-empty batched_tensor in which case it finalizes all other concurrently - * running instances and outputs its own element from the batch. - *

          - * batched_tensor: The possibly transformed output of Batch. The size of the first - * dimension should remain unchanged by the transformations for the operation to - * work. - * batch_index: The matching batch_index obtained from Batch. - * id: The id scalar emitted by Batch. - * unbatched_tensor: The Tensor corresponding to this execution. - * timeout_micros: Maximum amount of time (in microseconds) to wait to receive the - * batched input tensor associated with a given invocation of the op. - * container: Container to control resource sharing. - * shared_name: Instances of Unbatch with the same container and shared_name are - * assumed to possibly belong to the same batch. If left empty, the op name will - * be used as the shared name. + * Creates a rank-2 constant of {@code float} elements. * - * @param data type for {@code unbatchedTensor()} output - * @param batchedTensor - * @param batchIndex - * @param id - * @param timeoutMicros - * @param options carries optional attributes values - * @return a new instance of Unbatch + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a float constant */ - public Unbatch unbatch(Operand batchedTensor, Operand batchIndex, - Operand id, Long timeoutMicros, Unbatch.Options... options) { - return Unbatch.create(scope, batchedTensor, batchIndex, id, timeoutMicros, options); + public Constant val(float[][] data) { + return Constant.tensorOf(scope, data); } /** - * Gradient of Unbatch. - *

          - * Acts like Batch but using the given batch_index index of batching things as they - * become available. This ensures that the gradients are propagated back in the - * same session which did the forward pass. - *

          - * original_input: The input to the Unbatch operation this is the gradient of. - * batch_index: The batch_index given to the Unbatch operation this is the gradient - * of. - * grad: The downstream gradient. - * id: The id scalar emitted by Batch. - * batched_grad: The return value, either an empty tensor or the batched gradient. - * container: Container to control resource sharing. - * shared_name: Instances of UnbatchGrad with the same container and shared_name - * are assumed to possibly belong to the same batch. If left empty, the op name - * will be used as the shared name. + * Creates a rank-1 constant of {@code boolean} elements. * - * @param data type for {@code batchedGrad()} output - * @param originalInput - * @param batchIndex - * @param grad - * @param id - * @param options carries optional attributes values - * @return a new instance of UnbatchGrad + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a boolean constant */ - public UnbatchGrad unbatchGrad(Operand originalInput, - Operand batchIndex, Operand grad, Operand id, - UnbatchGrad.Options... options) { - return UnbatchGrad.create(scope, originalInput, batchIndex, grad, id, options); + public Constant val(boolean[] data) { + return Constant.vectorOf(scope, data); } /** - * Finds unique elements along an axis of a tensor. - *

          - * This operation either returns a tensor `y` containing unique elements - * along the `axis` of a tensor. The returned unique elements is sorted - * in the same order as they occur along `axis` in `x`. - * This operation also returns a tensor `idx` that is the same size as - * the number of the elements in `x` along the `axis` dimension. It - * contains the index in the unique output `y`. - * In other words, for an `1-D` tensor `x` with `axis = None: - *

          - * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - *

          - * For example: - *

          {@code
          -   *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
          -   *  y, idx = unique(x)
          -   *  y ==> [1, 2, 4, 7, 8]
          -   *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
          -   *  }
          - * For an `2-D` tensor `x` with `axis = 0`: - *
          {@code
          -   *  # tensor 'x' is [[1, 0, 0],
          -   *  #                [1, 0, 0],
          -   *  #                [2, 0, 0]]
          -   *  y, idx = unique(x, axis=0)
          -   *  y ==> [[1, 0, 0],
          -   *         [2, 0, 0]]
          -   *  idx ==> [0, 0, 1]
          -   *  }
          - * For an `2-D` tensor `x` with `axis = 1`: - *
          {@code
          -   *  # tensor 'x' is [[1, 0, 0],
          -   *  #                [1, 0, 0],
          -   *  #                [2, 0, 0]]
          -   *  y, idx = unique(x, axis=1)
          -   *  y ==> [[1, 0],
          -   *         [1, 0],
          -   *         [2, 0]]
          -   *  idx ==> [0, 1, 1]
          -   *  }
          + * Creates a rank-4 constant of {@code byte} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant + */ + public Constant val(byte[][][][] data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a constant of {@code int} elements that is a copy of a given n-dimensional array. * - * @param data type for {@code y()} output - * @param data type for {@code idx()} output - * @param x A `Tensor`. - * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to - * find the unique elements. - * @return a new instance of Unique + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code int} elements. + * @return an integer constant */ - public Unique unique(Operand x, - Operand axis) { - return Unique.create(scope, x, axis); + public Constant val(IntNdArray data) { + return Constant.tensorOf(scope, data); } /** - * Finds unique elements along an axis of a tensor. - *

          - * This operation either returns a tensor `y` containing unique elements - * along the `axis` of a tensor. The returned unique elements is sorted - * in the same order as they occur along `axis` in `x`. - * This operation also returns a tensor `idx` that is the same size as - * the number of the elements in `x` along the `axis` dimension. It - * contains the index in the unique output `y`. - * In other words, for an `1-D` tensor `x` with `axis = None: - *

          - * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - *

          - * For example: - *

          {@code
          -   *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
          -   *  y, idx = unique(x)
          -   *  y ==> [1, 2, 4, 7, 8]
          -   *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
          -   *  }
          - * For an `2-D` tensor `x` with `axis = 0`: - *
          {@code
          -   *  # tensor 'x' is [[1, 0, 0],
          -   *  #                [1, 0, 0],
          -   *  #                [2, 0, 0]]
          -   *  y, idx = unique(x, axis=0)
          -   *  y ==> [[1, 0, 0],
          -   *         [2, 0, 0]]
          -   *  idx ==> [0, 0, 1]
          -   *  }
          - * For an `2-D` tensor `x` with `axis = 1`: - *
          {@code
          -   *  # tensor 'x' is [[1, 0, 0],
          -   *  #                [1, 0, 0],
          -   *  #                [2, 0, 0]]
          -   *  y, idx = unique(x, axis=1)
          -   *  y ==> [[1, 0],
          -   *         [1, 0],
          -   *         [2, 0]]
          -   *  idx ==> [0, 1, 1]
          -   *  }
          + * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, + * using the default UTF-8 encoding. * - * @param data type for {@code y()} output - * @param data type for {@code idx()} output - * @param x A `Tensor`. - * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to - * find the unique elements. - * @param outIdx - * @return a new instance of Unique + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code String} elements. + * @return a byte constant */ - public Unique unique(Operand x, - Operand axis, DataType outIdx) { - return Unique.create(scope, x, axis, outIdx); + public Constant val(NdArray data) { + return Constant.tensorOf(scope, data); } /** - * Finds unique elements along an axis of a tensor. - *

          - * This operation either returns a tensor `y` containing unique elements - * along the `axis` of a tensor. The returned unique elements is sorted - * in the same order as they occur along `axis` in `x`. - * This operation also returns a tensor `idx` and a tensor `count` - * that are the same size as the number of the elements in `x` along the - * `axis` dimension. The `idx` contains the index in the unique output `y` - * and the `count` contains the count in the unique output `y`. - * In other words, for an `1-D` tensor `x` with `axis = None: - *

          - * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - *

          - * For example: - *

          {@code
          -   *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
          -   *  y, idx, count = unique_with_counts(x)
          -   *  y ==> [1, 2, 4, 7, 8]
          -   *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
          -   *  count ==> [2, 1, 3, 1, 2]
          -   *  }
          - * For an `2-D` tensor `x` with `axis = 0`: - *
          {@code
          -   *  # tensor 'x' is [[1, 0, 0],
          -   *  #                [1, 0, 0],
          -   *  #                [2, 0, 0]]
          -   *  y, idx, count = unique_with_counts(x, axis=0)
          -   *  y ==> [[1, 0, 0],
          -   *         [2, 0, 0]]
          -   *  idx ==> [0, 0, 1]
          -   *  count ==> [2, 1]
          -   *  }
          - * For an `2-D` tensor `x` with `axis = 1`: - *
          {@code
          -   *  # tensor 'x' is [[1, 0, 0],
          -   *  #                [1, 0, 0],
          -   *  #                [2, 0, 0]]
          -   *  y, idx, count = unique_with_counts(x, axis=1)
          -   *  y ==> [[1, 0],
          -   *         [1, 0],
          -   *         [2, 0]]
          -   *  idx ==> [0, 1, 1]
          -   *  count ==> [1, 2]
          -   *  }
          + * Creates a rank-6 constant of {@code long} elements. * - * @param data type for {@code y()} output - * @param data type for {@code idx()} output - * @param x A `Tensor`. - * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to - * find the unique elements. - * @return a new instance of UniqueWithCounts + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a long constant */ - public UniqueWithCounts uniqueWithCounts( - Operand x, Operand axis) { - return UniqueWithCounts.create(scope, x, axis); + public Constant val(long[][][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Finds unique elements along an axis of a tensor. - *

          - * This operation either returns a tensor `y` containing unique elements - * along the `axis` of a tensor. The returned unique elements is sorted - * in the same order as they occur along `axis` in `x`. - * This operation also returns a tensor `idx` and a tensor `count` - * that are the same size as the number of the elements in `x` along the - * `axis` dimension. The `idx` contains the index in the unique output `y` - * and the `count` contains the count in the unique output `y`. - * In other words, for an `1-D` tensor `x` with `axis = None: - *

          - * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - *

          - * For example: - *

          {@code
          -   *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
          -   *  y, idx, count = unique_with_counts(x)
          -   *  y ==> [1, 2, 4, 7, 8]
          -   *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
          -   *  count ==> [2, 1, 3, 1, 2]
          -   *  }
          - * For an `2-D` tensor `x` with `axis = 0`: - *
          {@code
          -   *  # tensor 'x' is [[1, 0, 0],
          -   *  #                [1, 0, 0],
          -   *  #                [2, 0, 0]]
          -   *  y, idx, count = unique_with_counts(x, axis=0)
          -   *  y ==> [[1, 0, 0],
          -   *         [2, 0, 0]]
          -   *  idx ==> [0, 0, 1]
          -   *  count ==> [2, 1]
          -   *  }
          - * For an `2-D` tensor `x` with `axis = 1`: - *
          {@code
          -   *  # tensor 'x' is [[1, 0, 0],
          -   *  #                [1, 0, 0],
          -   *  #                [2, 0, 0]]
          -   *  y, idx, count = unique_with_counts(x, axis=1)
          -   *  y ==> [[1, 0],
          -   *         [1, 0],
          -   *         [2, 0]]
          -   *  idx ==> [0, 1, 1]
          -   *  count ==> [1, 2]
          -   *  }
          + * Creates a rank-1 constant of {@code long} elements representing the size of each dimensions of + * the given shape. * - * @param data type for {@code y()} output - * @param data type for {@code idx()} output - * @param x A `Tensor`. - * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to - * find the unique elements. - * @param outIdx - * @return a new instance of UniqueWithCounts + * @param scope is a scope used to add the underlying operation. + * @param shape a shape + * @return a long constant */ - public UniqueWithCounts uniqueWithCounts( - Operand x, Operand axis, DataType outIdx) { - return UniqueWithCounts.create(scope, x, axis, outIdx); + public Constant val(Shape shape) { + return Constant.create(scope, shape); } /** - * Converts an array of flat indices into a tuple of coordinate arrays. - *

          + * Create a constant from a Tensor. * - * Example: - *

          {@code
          -   *  y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3])
          -   *  # 'dims' represent a hypothetical (3, 3) tensor of indices:
          -   *  # [[0, 1, *2*],
          -   *  #  [3, 4, *5*],
          -   *  #  [6, *7*, 8]]
          -   *  # For each entry from 'indices', this operation returns
          -   *  # its coordinates (marked with '*'), such as
          -   *  # 2 ==> (0, 2)
          -   *  # 5 ==> (1, 2)
          -   *  # 7 ==> (2, 1)
          -   *  y ==> [[0, 1, 2], [2, 2, 1]]
          -   *  }
          + * @param scope is a scope used to add the underlying operation. + * @param tensor a Tensor holding the constant value + * @return a constant of the same data type as `tensor` + */ + public Constant val(Tensor tensor) { + return Constant.create(scope, tensor); + } + + /** + * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, + * using the given encoding. * - * @compatibility(numpy) Equivalent to np.unravel_index - * @end_compatibility - * @param data type for {@code output()} output - * @param indices An 0-D or 1-D `int` Tensor whose elements are indices into the - * flattened version of an array of dimensions dims. - * @param dims An 1-D `int` Tensor. The shape of the array to use for unraveling - * indices. - * @return a new instance of UnravelIndex + * @param scope is a scope used to add the underlying operation. + * @param charset charset used to encode/decode string bytes. + * @param data an n-dimensional array of {@code String} elements. + * @return a byte constant */ - public UnravelIndex unravelIndex(Operand indices, Operand dims) { - return UnravelIndex.create(scope, indices, dims); + public Constant val(Charset charset, NdArray data) { + return Constant.tensorOf(scope, charset, data); } /** - * Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors. - *

          - * Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. - * For example, given a tensor of shape `(A, B, C, D)`; - *

          - * If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` - * and each tensor in `output` will have shape `(B, C, D)`. (Note that the - * dimension unpacked along is gone, unlike `split`). - *

          - * If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` - * and each tensor in `output` will have shape `(A, C, D)`. - * Etc. - *

          - * This is the opposite of `pack`. + * Creates a constant of {@code String} elements, using the given charset. * - * @param data type for {@code output()} output - * @param value 1-D or higher, with `axis` dimension size equal to `num`. - * @param num - * @param options carries optional attributes values - * @return a new instance of Unstack + * @param scope is a scope used to add the underlying operation. + * @param charset charset for encoding/decoding strings bytes. + * @param data An array containing the values to put into the new constant. String elements are + * sequences of bytes from the last array dimension. + * @return the {@code String} constant */ - public Unstack unstack(Operand value, Long num, - Unstack.Options... options) { - return Unstack.create(scope, value, num, options); + public Constant val(Charset charset, String[] data) { + return Constant.vectorOf(scope, charset, data); } /** - * Op is similar to a lightweight Dequeue. - *

          - * The basic functionality is similar to dequeue with many fewer - * capabilities and options. This Op is optimized for performance. + * Creates a {@code String} constant using a specified encoding. * - * @param dtypes - * @param options carries optional attributes values - * @return a new instance of Unstage + * @param scope is a scope used to add the underlying operation. + * @param charset The encoding from String to bytes. + * @param data The string to put into the new constant. + * @return a string constant */ - public Unstage unstage(List> dtypes, Unstage.Options... options) { - return Unstage.create(scope, dtypes, options); + public Constant val(Charset charset, String data) { + return Constant.scalarOf(scope, charset, data); } /** diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerOperation.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerOperation.java index ec3aefe8dce..c98a29fa322 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerOperation.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerOperation.java @@ -99,7 +99,7 @@ public Shape shape(int outputIndex) { for (int i = 0; i < shape.length; ++i) { shape[i] = dim(outputNativeHandle, i); } - return Shape.make(shape); + return Shape.of(shape); } @Override diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java index 563fb6f9eed..872f195fe1a 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/EagerSession.java @@ -339,6 +339,16 @@ public void close() { doClose(); } + // Cleanup default session context for unit tests + static void closeDefaultForTest() { + synchronized (EagerSession.class) { + if (defaultSession != null) { + defaultSession.doClose(); + defaultSession = null; + } + } + } + @Override public OperationBuilder opBuilder(String type, String name) { if (resourceCleanupStrategy == ResourceCleanupStrategy.ON_SAFE_POINTS) { diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/GraphOperation.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/GraphOperation.java index 0a891ebe7a9..655244c172a 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/GraphOperation.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/GraphOperation.java @@ -141,7 +141,7 @@ Shape shape(int outputIdx) { Graph.Reference r = graph.ref(); try { long[] shape = shape(r.nativeHandle(), getUnsafeNativeHandle(), outputIdx); - return shape == null ? Shape.unknown() : Shape.make(shape); + return shape == null ? Shape.unknown() : Shape.of(shape); } finally { r.close(); } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Operand.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Operand.java index 39151afb870..8b75cf57a39 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Operand.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Operand.java @@ -23,17 +23,19 @@ *

          Example usage: * *

          {@code
          + * Ops tf = Ops.create();
          + *
            * // The "decodeJpeg" operation can be used as an operand to the "cast" operation
          - * Operand decodeJpeg = ops.image.decodeJpeg(...);
          - * ops.dtypes.cast(decodeJpeg, TFloat32.DTYPE);
          + * Operand decodeJpeg = tf.image.decodeJpeg(...);
          + * tf.dtypes.cast(decodeJpeg, TFloat32.DTYPE);
            *
            * // The output "y" of the "unique" operation can be used as an operand to the "cast" operation
          - * Output y = ops.unique(...).y();
          - * ops.dtypes.cast(y, TFloat32.DTYPE);
          + * Output y = tf.unique(...).y();
          + * tf.dtypes.cast(y, TFloat32.DTYPE);
            *
            * // The "split" operation can be used as operand list to the "concat" operation
          - * Iterable> split = ops.split(...);
          - * ops.concat(split, ops.constant(0));
          + * Iterable> split = tf.split(...);
          + * tf.concat(split, tf.val(0));
            * }
          */ public interface Operand { @@ -49,10 +51,25 @@ public interface Operand { Output asOutput(); /** - * Returns the data of the tensor. + * Returns this operand as a tensor. + * + * Only works when running in an eager execution + *

          This helper method is equivalent to {@code asOutput().tensor()} + * + * @return the tensor + * @throws IllegalStateException if this is an operand of a graph + */ + default Tensor asTensor() { + return asOutput().tensor(); + } + + /** + * Returns the data of this operand. * - * This only works when running in an eager execution + * Only works when running in an eager execution + *

          This helper method is equivalent to {@code asTensor().data()} * + * @return the tensor data * @throws IllegalStateException if this is an operand of a graph */ default T data() { diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Tensor.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Tensor.java index 57026923d5d..585442ea559 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Tensor.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Tensor.java @@ -64,8 +64,8 @@ import org.tensorflow.internal.c_api.TF_Tensor; import org.tensorflow.tools.Shape; import org.tensorflow.types.TBool; -import org.tensorflow.types.TFloat64; import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -158,7 +158,7 @@ public static Tensor create(Object obj, DataType dtype) } long[] dimSizes = new long[numDimensions(obj, dtype)]; fillShape(obj, 0, dimSizes); - Tensor t = new Tensor(dtype, Shape.make(dimSizes)); + Tensor t = new Tensor(dtype, Shape.of(dimSizes)); TF_Tensor nativeHandle; if (t.dtype != TString.DTYPE) { long byteSize = elemByteSize(t.dtype) * t.shape.size(); @@ -290,25 +290,25 @@ public static Tensor create(DataType dtype, long[] shape return t; } - public static Tensor allocate(DataType dtype, Shape shape) { - return allocate(dtype, shape, shape.size() * dtype.byteSize()); + public static Tensor of(DataType dtype, Shape shape) { + return of(dtype, shape, shape.size() * dtype.byteSize()); } - public static Tensor allocate(DataType dtype, Shape shape, long size) { + public static Tensor of(DataType dtype, Shape shape, long size) { Tensor t = new Tensor<>(dtype, shape); TF_Tensor nativeHandle = allocate(t.dtype.nativeCode(), shape.asArray(), size); t.nativeRef = new NativeReference(nativeHandle); return t; } - public static Tensor allocate(DataType dtype, Shape shape, + public static Tensor of(DataType dtype, Shape shape, Consumer dataInitializer) { - return allocate(dtype, shape, shape.size() * dtype.byteSize(), dataInitializer); + return of(dtype, shape, shape.size() * dtype.byteSize(), dataInitializer); } - public static Tensor allocate(DataType dtype, Shape shape, long size, + public static Tensor of(DataType dtype, Shape shape, long size, Consumer dataInitializer) { - Tensor tensor = allocate(dtype, shape, size); + Tensor tensor = of(dtype, shape, size); try { dataInitializer.accept(tensor.data()); return tensor; @@ -350,7 +350,7 @@ private static Tensor allocateForBuffer(DataType dataTyp // DT_STRING tensor encoded in a ByteBuffer. nbytes = nBuffered; } - Tensor t = new Tensor<>(dataType, Shape.make(dimSizes)); + Tensor t = new Tensor<>(dataType, Shape.of(dimSizes)); TF_Tensor nativeHandle = allocate(t.dtype.nativeCode(), dimSizes, nbytes); t.nativeRef = new NativeReference(nativeHandle); return t; @@ -582,7 +582,7 @@ public String toString() { *

          Takes ownership of the handle. */ static Tensor fromHandle(TF_Tensor handle) { - Tensor t = new Tensor<>(DataTypes.fromNativeCode(dtype(handle)), Shape.make(shape(handle))); + Tensor t = new Tensor<>(DataTypes.fromNativeCode(dtype(handle)), Shape.of(shape(handle))); t.nativeRef = new NativeReference(handle); return t; } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Constant.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Constant.java index 4ce963f7864..45f641dc3ec 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Constant.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Constant.java @@ -15,8 +15,6 @@ package org.tensorflow.op.core; -import static java.nio.charset.StandardCharsets.UTF_8; - import java.nio.ByteBuffer; import java.nio.DoubleBuffer; import java.nio.FloatBuffer; @@ -28,19 +26,44 @@ import org.tensorflow.Operation; import org.tensorflow.Output; import org.tensorflow.Tensor; +import org.tensorflow.op.Ops; import org.tensorflow.op.PrimitiveOp; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.Operator; +import org.tensorflow.tools.Shape; +import org.tensorflow.tools.ndarray.BooleanNdArray; +import org.tensorflow.tools.ndarray.ByteNdArray; +import org.tensorflow.tools.ndarray.DoubleNdArray; +import org.tensorflow.tools.ndarray.FloatNdArray; +import org.tensorflow.tools.ndarray.IntNdArray; +import org.tensorflow.tools.ndarray.LongNdArray; +import org.tensorflow.tools.ndarray.NdArray; +import org.tensorflow.tools.ndarray.NdArrays; +import org.tensorflow.tools.ndarray.StdArrays; import org.tensorflow.types.TBool; -import org.tensorflow.types.TFloat64; import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; +import org.tensorflow.types.TUint8; import org.tensorflow.types.family.TType; -/** An operator producing a constant value. */ +/** + * An operator producing a constant value. + * + *

          All endpoints of this operator are named `val`, except those accepting vararg + * elements in parameter, which are named `array`. For example: + * + *

          {@code
          + * Ops tf = Ops.create();
          + * tf.val(1.0f);  // mapped to Constant.scalarOf(scope, float);
          + * tf.val(new float[] {1.0f, 2.0f});  // mapped to Constant.vectorOf(scope, float[])
          + * tf.val(new float[][] { {1.0f, 2.0f}, {3.0f, 4.0f} });  //mapped to Constant.tensorOf(scope, float[][])
          + * tf.array(1.0f, 2.0f, 3.0f);  // mapped to Constant.arrayOf(scope, float...)
          + * }
          + */ @Operator public final class Constant extends PrimitiveOp implements Operand { @@ -51,8 +74,8 @@ public final class Constant extends PrimitiveOp implements Oper * @param data The value to put into the new constant. * @return an integer constant */ - @Endpoint - public static Constant create(Scope scope, int data) { + @Endpoint(name = "val") + public static Constant scalarOf(Scope scope, int data) { try (Tensor value = TInt32.scalarOf(data)) { return create(scope, value); } @@ -64,24 +87,43 @@ public static Constant create(Scope scope, int data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return an integer constant */ - @Endpoint - public static Constant create(Scope scope, int[] data) { + @Endpoint(name = "val") + public static Constant vectorOf(Scope scope, int[] data) { try (Tensor value = TInt32.vectorOf(data)) { return create(scope, value); } } + /** + * Creates a constant of {@code int} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a float constant + */ + @Endpoint(name = "array") + public static Constant arrayOf(Scope scope, int... data) { + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + return vectorOf(scope, data); + } + /** * Creates a rank-2 constant of {@code int} elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return an integer constant */ - @Endpoint - public static Constant create(Scope scope, int[][] data) { - return create(scope, data, TInt32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, int[][] data) { + try (Tensor value = TInt32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -90,10 +132,13 @@ public static Constant create(Scope scope, int[][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return an integer constant */ - @Endpoint - public static Constant create(Scope scope, int[][][] data) { - return create(scope, data, TInt32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, int[][][] data) { + try (Tensor value = TInt32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -102,10 +147,13 @@ public static Constant create(Scope scope, int[][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return an integer constant */ - @Endpoint - public static Constant create(Scope scope, int[][][][] data) { - return create(scope, data, TInt32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, int[][][][] data) { + try (Tensor value = TInt32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -114,10 +162,13 @@ public static Constant create(Scope scope, int[][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return an integer constant */ - @Endpoint - public static Constant create(Scope scope, int[][][][][] data) { - return create(scope, data, TInt32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, int[][][][][] data) { + try (Tensor value = TInt32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -126,10 +177,27 @@ public static Constant create(Scope scope, int[][][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return an integer constant */ - @Endpoint - public static Constant create(Scope scope, int[][][][][][] data) { - return create(scope, data, TInt32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, int[][][][][][] data) { + try (Tensor value = TInt32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code int} elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code int} elements. + * @return an integer constant + */ + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, IntNdArray data) { + try (Tensor value = TInt32.tensorOf(data)) { + return create(scope, value); + } } /** @@ -145,8 +213,10 @@ public static Constant create(Scope scope, int[][][][][][] data) { * @param data a buffer containing the tensor data. * @return an integer constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @deprecated use {@link Ops#val(Tensor) Ops.constant(Tensor<TInt32>)} instead */ @Endpoint + @Deprecated public static Constant create(Scope scope, long[] shape, IntBuffer data) { try (Tensor value = Tensor.create(shape, data)) { return create(scope, value); @@ -157,12 +227,14 @@ public static Constant create(Scope scope, long[] shape, IntBuffer data) * Creates a constant containing a single {@code float} element. * * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. + * @param data The value to put into the new constant. * @return a float constant */ - @Endpoint - public static Constant create(Scope scope, float data) { - return create(scope, data, TFloat32.DTYPE); + @Endpoint(name = "val") + public static Constant scalarOf(Scope scope, float data) { + try (Tensor value = TFloat32.scalarOf(data)) { + return create(scope, value); + } } /** @@ -171,10 +243,28 @@ public static Constant create(Scope scope, float data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a float constant */ - @Endpoint - public static Constant create(Scope scope, float[] data) { - return create(scope, data, TFloat32.DTYPE); + @Endpoint(name = "val") + public static Constant vectorOf(Scope scope, float[] data) { + try (Tensor value = TFloat32.vectorOf(data)) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code float} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a float constant + */ + @Endpoint(name = "array") + public static Constant arrayOf(Scope scope, float... data) { + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + return vectorOf(scope, data); } /** @@ -183,10 +273,13 @@ public static Constant create(Scope scope, float[] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a float constant */ - @Endpoint - public static Constant create(Scope scope, float[][] data) { - return create(scope, data, TFloat32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, float[][] data) { + try (Tensor value = TFloat32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -195,10 +288,13 @@ public static Constant create(Scope scope, float[][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a float constant */ - @Endpoint - public static Constant create(Scope scope, float[][][] data) { - return create(scope, data, TFloat32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, float[][][] data) { + try (Tensor value = TFloat32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -207,10 +303,13 @@ public static Constant create(Scope scope, float[][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a float constant */ - @Endpoint - public static Constant create(Scope scope, float[][][][] data) { - return create(scope, data, TFloat32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, float[][][][] data) { + try (Tensor value = TFloat32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -219,10 +318,13 @@ public static Constant create(Scope scope, float[][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a float constant */ - @Endpoint - public static Constant create(Scope scope, float[][][][][] data) { - return create(scope, data, TFloat32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, float[][][][][] data) { + try (Tensor value = TFloat32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -231,10 +333,27 @@ public static Constant create(Scope scope, float[][][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a float constant */ - @Endpoint - public static Constant create(Scope scope, float[][][][][][] data) { - return create(scope, data, TFloat32.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, float[][][][][][] data) { + try (Tensor value = TFloat32.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code float} elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code float} elements. + * @return a float constant + */ + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, FloatNdArray data) { + try (Tensor value = TFloat32.tensorOf(data)) { + return create(scope, value); + } } /** @@ -250,8 +369,10 @@ public static Constant create(Scope scope, float[][][][][][] data) { * @param data a buffer containing the tensor data. * @return a float constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @deprecated use {@link Ops#val(Tensor) Ops.constant(Tensor<TFloat32>)} instead */ @Endpoint + @Deprecated public static Constant create(Scope scope, long[] shape, FloatBuffer data) { try (Tensor value = Tensor.create(shape, data)) { return create(scope, value); @@ -265,9 +386,11 @@ public static Constant create(Scope scope, long[] shape, FloatBuffer d * @param data The value to put into the new constant. * @return a double constant */ - @Endpoint - public static Constant create(Scope scope, double data) { - return create(scope, data, TFloat64.DTYPE); + @Endpoint(name = "val") + public static Constant scalarOf(Scope scope, double data) { + try (Tensor value = TFloat64.scalarOf(data)) { + return create(scope, value); + } } /** @@ -276,10 +399,28 @@ public static Constant create(Scope scope, double data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a double constant */ - @Endpoint - public static Constant create(Scope scope, double[] data) { - return create(scope, data, TFloat64.DTYPE); + @Endpoint(name = "val") + public static Constant vectorOf(Scope scope, double[] data) { + try (Tensor value = TFloat64.vectorOf(data)) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code double} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a double constant + */ + @Endpoint(name = "array") + public static Constant arrayOf(Scope scope, double... data) { + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + return vectorOf(scope, data); } /** @@ -288,10 +429,13 @@ public static Constant create(Scope scope, double[] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a double constant */ - @Endpoint - public static Constant create(Scope scope, double[][] data) { - return create(scope, data, TFloat64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, double[][] data) { + try (Tensor value = TFloat64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -300,10 +444,13 @@ public static Constant create(Scope scope, double[][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a double constant */ - @Endpoint - public static Constant create(Scope scope, double[][][] data) { - return create(scope, data, TFloat64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, double[][][] data) { + try (Tensor value = TFloat64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -312,10 +459,13 @@ public static Constant create(Scope scope, double[][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a double constant */ - @Endpoint - public static Constant create(Scope scope, double[][][][] data) { - return create(scope, data, TFloat64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, double[][][][] data) { + try (Tensor value = TFloat64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -324,10 +474,13 @@ public static Constant create(Scope scope, double[][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a double constant */ - @Endpoint - public static Constant create(Scope scope, double[][][][][] data) { - return create(scope, data, TFloat64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, double[][][][][] data) { + try (Tensor value = TFloat64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -336,10 +489,27 @@ public static Constant create(Scope scope, double[][][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a double constant */ - @Endpoint - public static Constant create(Scope scope, double[][][][][][] data) { - return create(scope, data, TFloat64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, double[][][][][][] data) { + try (Tensor value = TFloat64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code double} elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code double} elements. + * @return a double constant + */ + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, DoubleNdArray data) { + try (Tensor value = TFloat64.tensorOf(data)) { + return create(scope, value); + } } /** @@ -355,8 +525,10 @@ public static Constant create(Scope scope, double[][][][][][] data) { * @param data a buffer containing the tensor data. * @return a double constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @deprecated use {@link Ops#val(Tensor) Ops.constant(Tensor<TFloat64>)} instead */ @Endpoint + @Deprecated public static Constant create(Scope scope, long[] shape, DoubleBuffer data) { try (Tensor value = Tensor.create(shape, data)) { return create(scope, value); @@ -370,9 +542,11 @@ public static Constant create(Scope scope, long[] shape, DoubleBuffer * @param data The value to put into the new constant. * @return a long constant */ - @Endpoint - public static Constant create(Scope scope, long data) { - return create(scope, data, TInt64.DTYPE); + @Endpoint(name = "val") + public static Constant scalarOf(Scope scope, long data) { + try (Tensor value = TInt64.scalarOf(data)) { + return create(scope, value); + } } /** @@ -381,10 +555,13 @@ public static Constant create(Scope scope, long data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a long constant */ - @Endpoint - public static Constant create(Scope scope, long[] data) { - return create(scope, data, TInt64.DTYPE); + @Endpoint(name = "val") + public static Constant vectorOf(Scope scope, long[] data) { + try (Tensor value = TInt64.vectorOf(data)) { + return create(scope, value); + } } /** @@ -393,10 +570,28 @@ public static Constant create(Scope scope, long[] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a long constant */ - @Endpoint - public static Constant create(Scope scope, long[][] data) { - return create(scope, data, TInt64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, long[][] data) { + try (Tensor value = TInt64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code long} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a long constant + */ + @Endpoint(name = "array") + public static Constant arrayOf(Scope scope, long... data) { + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + return vectorOf(scope, data); } /** @@ -405,10 +600,13 @@ public static Constant create(Scope scope, long[][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a long constant */ - @Endpoint - public static Constant create(Scope scope, long[][][] data) { - return create(scope, data, TInt64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, long[][][] data) { + try (Tensor value = TInt64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -417,10 +615,13 @@ public static Constant create(Scope scope, long[][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a long constant */ - @Endpoint - public static Constant create(Scope scope, long[][][][] data) { - return create(scope, data, TInt64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, long[][][][] data) { + try (Tensor value = TInt64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -429,10 +630,13 @@ public static Constant create(Scope scope, long[][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a long constant */ - @Endpoint - public static Constant create(Scope scope, long[][][][][] data) { - return create(scope, data, TInt64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, long[][][][][] data) { + try (Tensor value = TInt64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -441,10 +645,27 @@ public static Constant create(Scope scope, long[][][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a long constant */ - @Endpoint - public static Constant create(Scope scope, long[][][][][][] data) { - return create(scope, data, TInt64.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, long[][][][][][] data) { + try (Tensor value = TInt64.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code long} elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code long} elements. + * @return a long constant + */ + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, LongNdArray data) { + try (Tensor value = TInt64.tensorOf(data)) { + return create(scope, value); + } } /** @@ -460,8 +681,10 @@ public static Constant create(Scope scope, long[][][][][][] data) { * @param data a buffer containing the tensor data. * @return a long constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @deprecated use {@link Ops#val(Tensor) Ops.constant(Tensor<TInt64>)} instead */ @Endpoint + @Deprecated public static Constant create(Scope scope, long[] shape, LongBuffer data) { try (Tensor value = Tensor.create(shape, data)) { return create(scope, value); @@ -475,9 +698,11 @@ public static Constant create(Scope scope, long[] shape, LongBuffer data * @param data The value to put into the new constant. * @return a boolean constant */ - @Endpoint - public static Constant create(Scope scope, boolean data) { - return create(scope, data, TBool.DTYPE); + @Endpoint(name = "val") + public static Constant scalarOf(Scope scope, boolean data) { + try (Tensor value = TBool.scalarOf(data)) { + return create(scope, value); + } } /** @@ -486,10 +711,28 @@ public static Constant create(Scope scope, boolean data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a boolean constant */ - @Endpoint - public static Constant create(Scope scope, boolean[] data) { - return create(scope, data, TBool.DTYPE); + @Endpoint(name = "val") + public static Constant vectorOf(Scope scope, boolean[] data) { + try (Tensor value = TBool.vectorOf(data)) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code boolean} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a boolean constant + */ + @Endpoint(name = "array") + public static Constant arrayOf(Scope scope, boolean... data) { + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + return vectorOf(scope, data); } /** @@ -498,10 +741,13 @@ public static Constant create(Scope scope, boolean[] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a boolean constant */ - @Endpoint - public static Constant create(Scope scope, boolean[][] data) { - return create(scope, data, TBool.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, boolean[][] data) { + try (Tensor value = TBool.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -510,10 +756,13 @@ public static Constant create(Scope scope, boolean[][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a boolean constant */ - @Endpoint - public static Constant create(Scope scope, boolean[][][] data) { - return create(scope, data, TBool.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, boolean[][][] data) { + try (Tensor value = TBool.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -522,10 +771,13 @@ public static Constant create(Scope scope, boolean[][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a boolean constant */ - @Endpoint - public static Constant create(Scope scope, boolean[][][][] data) { - return create(scope, data, TBool.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, boolean[][][][] data) { + try (Tensor value = TBool.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -534,10 +786,13 @@ public static Constant create(Scope scope, boolean[][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a boolean constant */ - @Endpoint - public static Constant create(Scope scope, boolean[][][][][] data) { - return create(scope, data, TBool.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, boolean[][][][][] data) { + try (Tensor value = TBool.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** @@ -546,109 +801,160 @@ public static Constant create(Scope scope, boolean[][][][][] data) { * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. + * @return a boolean constant */ - @Endpoint - public static Constant create(Scope scope, boolean[][][][][][] data) { - return create(scope, data, TBool.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, boolean[][][][][][] data) { + try (Tensor value = TBool.tensorOf(StdArrays.shapeOf(data), t -> StdArrays.copyTo(t, data))) { + return create(scope, value); + } } /** - * Creates a {@code String} constant using the default, UTF-8 encoding. + * Creates a constant of {@code boolean} elements that is a copy of a given n-dimensional array. * * @param scope is a scope used to add the underlying operation. - * @param data The string to put into the new constant. - * @return a string constant + * @param data an n-dimensional array of {@code boolean} elements. + * @return a boolean constant */ - @Endpoint - public static Constant create(Scope scope, String data) { - return create(scope, data, UTF_8); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, BooleanNdArray data) { + try (Tensor value = TBool.tensorOf(data)) { + return create(scope, value); + } } /** - * Creates a {@code String} constant using a specified encoding. + * Creates a constant containing a single {@code byte} element. * * @param scope is a scope used to add the underlying operation. - * @param charset The encoding from String to bytes. - * @param data The string to put into the new constant. - * @return a string constant + * @param data The value to put into the new constant. + * @return a byte constant */ - @Endpoint - public static Constant create(Scope scope, String data, Charset charset) { - try (Tensor value = Tensor.create(data.getBytes(charset), TString.DTYPE)) { + @Endpoint(name = "val") + public static Constant scalarOf(Scope scope, byte data) { + try (Tensor value = TUint8.scalarOf(data)) { return create(scope, value); } } /** - * Creates a constant containing a single {@code String} element, represented as an array of {@code byte}s. + * Creates a rank-1 constant of {@code byte} elements. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - @Endpoint - public static Constant create(Scope scope, byte[] data) { - return create(scope, data, TString.DTYPE); + @Endpoint(name = "val") + public static Constant vectorOf(Scope scope, byte[] data) { + try (Tensor value = TUint8.vectorOf(data)) { + return create(scope, value); + } } /** - * Creates a rank-1 constant of {@code String} elements, each represented as an array of {@code byte}s. + * Creates a constant of {@code byte} elements. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. + * @param data An array containing the values to put into the new constant. + * @return a byte constant */ - @Endpoint - public static Constant create(Scope scope, byte[][] data) { - return create(scope, data, TString.DTYPE); + @Endpoint(name = "array") + public static Constant arrayOf(Scope scope, byte... data) { + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + return vectorOf(scope, data); } /** - * Creates a rank-2 constant of {@code String} elements, each represented as an array of {@code byte}s. + * Creates a rank-2 constant of {@code byte} elements. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - @Endpoint - public static Constant create(Scope scope, byte[][][] data) { - return create(scope, data, TString.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, byte[][] data) { + try (Tensor value = TUint8.tensorOf(StdArrays.shapeOf(data), d -> StdArrays.copyTo(d, data))) { + return create(scope, value); + } } /** - * Creates a rank-3 constant of {@code String} elements, each represented as an array of {@code byte}s. + * Creates a rank-3 constant of {@code byte} elements. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - @Endpoint - public static Constant create(Scope scope, byte[][][][] data) { - return create(scope, data, TString.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, byte[][][] data) { + try (Tensor value = TUint8.tensorOf(StdArrays.shapeOf(data), d -> StdArrays.copyTo(d, data))) { + return create(scope, value); + } } /** - * Creates a rank-4 constant of {@code String} elements, each represented as an array of {@code byte}s. + * Creates a rank-4 constant of {@code byte} elements. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - @Endpoint - public static Constant create(Scope scope, byte[][][][][] data) { - return create(scope, data, TString.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, byte[][][][] data) { + try (Tensor value = TUint8.tensorOf(StdArrays.shapeOf(data), d -> StdArrays.copyTo(d, data))) { + return create(scope, value); + } } /** - * Creates a rank-5 constant of {@code String} elements, each represented as an array of {@code byte}s. + * Creates a rank-5 constant of {@code byte} elements. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - @Endpoint - public static Constant create(Scope scope, byte[][][][][][] data) { - return create(scope, data, TString.DTYPE); + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, byte[][][][][] data) { + try (Tensor value = TUint8.tensorOf(StdArrays.shapeOf(data), d -> StdArrays.copyTo(d, data))) { + return create(scope, value); + } + } + + /** + * Creates a rank-6 constant of {@code byte} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant + */ + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, byte[][][][][][] data) { + try (Tensor value = TUint8.tensorOf(StdArrays.shapeOf(data), d -> StdArrays.copyTo(d, data))) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code byte} elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code byte} elements. + * @return a byte constant + */ + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, ByteNdArray data) { + try (Tensor value = TUint8.tensorOf(data)) { + return create(scope, value); + } } /** @@ -666,14 +972,213 @@ public static Constant create(Scope scope, byte[][][][][][] data) { * @return a constant of type `type` * @throws IllegalArgumentException If the tensor datatype or shape is not compatible with the * buffer + * @deprecated use {@link Ops#val(Tensor)} instead */ @Endpoint + @Deprecated public static Constant create(Scope scope, DataType type, long[] shape, ByteBuffer data) { try (Tensor value = Tensor.create(type, shape, data)) { return create(scope, value); } } + /** + * Creates a {@code String} constant using the default, UTF-8 encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param data The string to put into the new constant. + * @return a string constant + */ + @Endpoint(name = "val") + public static Constant scalarOf(Scope scope, String data) { + try (Tensor value = TString.scalarOf(data)) { + return create(scope, value); + } + } + + /** + * Creates a {@code String} constant using a specified encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param charset The encoding from String to bytes. + * @param data The string to put into the new constant. + * @return a string constant + */ + @Endpoint(name = "val") + public static Constant scalarOf(Scope scope, Charset charset, String data) { + try (Tensor value = TString.tensorOf(charset, NdArrays.scalarOfObject(data))) { + return create(scope, value); + } + } + + /** + * Creates a rank-1 constant of {@code String} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a {@link TString} constant matrix + */ + public static Constant vectorOf(Scope scope, String[] data) { + NdArray src = NdArrays.vectorOfObjects(data); + try (Tensor value = TString.tensorOf(src)) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code String} elements, using the given charset. + * + * @param scope is a scope used to add the underlying operation. + * @param charset charset for encoding/decoding strings bytes. + * @param data An array containing the values to put into the new constant. String elements are + * sequences of bytes from the last array dimension. + * @return the {@code String} constant + */ + @Endpoint(name = "val") + public static Constant vectorOf(Scope scope, Charset charset, String[] data) { + try (Tensor value = TString.tensorOf(charset, NdArrays.vectorOfObjects(data))) { + return Constant.create(scope, value); + } + } + + /** + * Creates a constant of {@code String} elements, using the default UTF-8 charset. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return the {@code String} constant + */ + @Endpoint(name = "array") + public static Constant arrayOf(Scope scope, String... data) { + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + return vectorOf(scope, data); + } + + /** + * Creates a constant of {@code String} elements, using the given charset. + * + * @param scope is a scope used to add the underlying operation. + * @param charset charset for encoding/decoding strings bytes. + * @param data An array containing the values to put into the new constant. String elements are + * sequences of bytes from the last array dimension. + * @return the {@code String} constant + */ + @Endpoint(name = "array") + public static Constant arrayOf(Scope scope, Charset charset, String... data) { + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + return vectorOf(scope, charset, data); + } + + /** + * Creates a rank-2 constant of {@code String} elements, using default UTF-8 encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a {@link TString} constant matrix + */ + public static Constant tensorOf(Scope scope, String[][] data) { + NdArray src = NdArrays.ofObjects(String.class, StdArrays.shapeOf(data)); + StdArrays.copyTo(src, data); + try (Tensor value = TString.tensorOf(src)) { + return create(scope, value); + } + } + + /** + * Creates a rank-3 constant of {@code String} elements, using default UTF-8 encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a {@link TString} constant matrix + */ + public static Constant tensorOf(Scope scope, String[][][] data) { + NdArray src = NdArrays.ofObjects(String.class, StdArrays.shapeOf(data)); + StdArrays.copyTo(src, data); + try (Tensor value = TString.tensorOf(src)) { + return create(scope, value); + } + } + + /** + * Creates a rank-4 constant of {@code String} elements, using default UTF-8 encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a {@link TString} constant matrix + */ + public static Constant tensorOf(Scope scope, String[][][][] data) { + NdArray src = NdArrays.ofObjects(String.class, StdArrays.shapeOf(data)); + StdArrays.copyTo(src, data); + try (Tensor value = TString.tensorOf(src)) { + return create(scope, value); + } + } + + /** + * Creates a rank-5 constant of {@code String} elements, using default UTF-8 encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a {@link TString} constant matrix + */ + public static Constant tensorOf(Scope scope, String[][][][][] data) { + NdArray src = NdArrays.ofObjects(String.class, StdArrays.shapeOf(data)); + StdArrays.copyTo(src, data); + try (Tensor value = TString.tensorOf(src)) { + return create(scope, value); + } + } + + /** + * Creates a rank-6 constant of {@code String} elements, using default UTF-8 encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a {@link TString} constant matrix + */ + public static Constant tensorOf(Scope scope, String[][][][][][] data) { + NdArray src = NdArrays.ofObjects(String.class, StdArrays.shapeOf(data)); + StdArrays.copyTo(src, data); + try (Tensor value = TString.tensorOf(src)) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, + * using the default UTF-8 encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code String} elements. + * @return a byte constant + */ + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, NdArray data) { + try (Tensor value = TString.tensorOf(data)) { + return create(scope, value); + } + } + + /** + * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, + * using the given encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param charset charset used to encode/decode string bytes. + * @param data an n-dimensional array of {@code String} elements. + * @return a byte constant + */ + @Endpoint(name = "val") + public static Constant tensorOf(Scope scope, Charset charset, NdArray data) { + try (Tensor value = TString.tensorOf(charset, data)) { + return create(scope, value); + } + } + /** * Create a constant from a Java object. * @@ -689,14 +1194,29 @@ public static Constant create(Scope scope, DataType type * @param object a Java object representing the constant. * @return a constant of type `type` * @see org.tensorflow.Tensor#create(Object) Tensor.create + * @deprecated use {@link Ops#val(Tensor)} instead */ @Endpoint + @Deprecated public static Constant create(Scope scope, Object object, DataType type) { try (Tensor value = Tensor.create(object, type)) { return create(scope, value); } } + /** + * Creates a rank-1 constant of {@code long} elements representing the size of each dimensions of + * the given shape. + * + * @param scope is a scope used to add the underlying operation. + * @param shape a shape + * @return a long constant + */ + @Endpoint(name = "val") + public static Constant create(Scope scope, Shape shape) { + return vectorOf(scope, shape.asArray()); + } + /** * Create a constant from a Tensor. * @@ -704,7 +1224,7 @@ public static Constant create(Scope scope, Object object, D * @param tensor a Tensor holding the constant value * @return a constant of the same data type as `tensor` */ - @Endpoint + @Endpoint(name = "val") public static Constant create(Scope scope, Tensor tensor) { return new Constant<>( scope diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Gradients.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Gradients.java index b8bbc92a3d1..bcc2032da64 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Gradients.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Gradients.java @@ -44,15 +44,14 @@ *

          * Example of usage: *

          {@code
          - * Gradients gradients = Gradients.create(scope, Arrays.asList(loss), Arrays.asList(w, b));
          - * 
          - * Constant alpha = ops.constant(1.0f, Float.class);
          - * ApplyGradientDescent.create(scope, w, alpha, gradients.dy(0));
          - * ApplyGradientDescent.create(scope, b, alpha, gradients.dy(1));
          + * Gradients gradients = tf.gradients(loss, Arrays.asList(w, b));
          + * Constant alpha = tf.val(1.0f);
          + * tf.train.applyGradientDescent(w, alpha, gradients.dy(0));
          + * tf.train.applyGradientDescent(b, alpha, gradients.dy(1));
            * }
          */ @Operator -public class Gradients implements Op, Iterable> { +public final class Gradients implements Op, Iterable> { /** * Optional attributes for {@link Gradients} diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Zeros.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Zeros.java index 88951780cb5..612af709e4a 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Zeros.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/core/Zeros.java @@ -19,6 +19,7 @@ import org.tensorflow.Operand; import org.tensorflow.Output; import org.tensorflow.op.Op; +import org.tensorflow.op.Ops; import org.tensorflow.op.Scope; import org.tensorflow.op.annotation.Endpoint; import org.tensorflow.op.annotation.Operator; @@ -29,14 +30,14 @@ * An operator creating a constant initialized with zeros of the shape given by `dims`. * *

          For example, the following expression - *

          {@code ops.zeros(ops.constant(new long[]{2, 2}), Float.class)
          + *
          {@code tf.zeros(tf.val(shape), TFloat32.DTYPE)
          * is the equivalent of - *
          {@code ops.fill(ops.constant(new long[]{2, 2}), ops.constant(0.0f))
          + *
          {@code tf.fill(tf.val(shape), tf.val(0.0f))
          * * @param constant type */ @Operator -public class Zeros implements Op, Operand { +public final class Zeros implements Op, Operand { /** * Creates a zeroed tensor given its type and shape. diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBfloat16.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBfloat16.java index 9d1e8a0cbbd..22b73eab18c 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBfloat16.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBfloat16.java @@ -17,6 +17,7 @@ package org.tensorflow.types; +import java.util.function.Consumer; import org.tensorflow.DataType; import org.tensorflow.Tensor; import org.tensorflow.internal.buffer.TensorBuffers; @@ -26,6 +27,7 @@ import org.tensorflow.tools.buffer.layout.DataLayouts; import org.tensorflow.tools.ndarray.FloatNdArray; import org.tensorflow.tools.ndarray.NdArray; +import org.tensorflow.tools.ndarray.StdArrays; import org.tensorflow.tools.ndarray.impl.dense.FloatDenseNdArray; import org.tensorflow.types.family.TNumber; @@ -56,7 +58,7 @@ public interface TBfloat16 extends FloatNdArray, TNumber { * @return the new tensor */ static Tensor scalarOf(float value) { - return Tensor.allocate(DTYPE, Shape.scalar(), data -> data.setFloat(value)); + return Tensor.of(DTYPE, Shape.scalar(), data -> data.setFloat(value)); } /** @@ -66,41 +68,44 @@ static Tensor scalarOf(float value) { * @return the new tensor */ static Tensor vectorOf(float... values) { - return Tensor.allocate(DTYPE, Shape.make(values.length), data -> data.write(values)); + if (values == null) { + throw new IllegalArgumentException(); + } + return Tensor.of(DTYPE, Shape.of(values.length), data -> StdArrays.copyTo(data, values)); } /** - * Allocates a new tensor of the given shape. + * Allocates a new tensor which is a copy of a given array of floats. * - * @param shape shape of the tensor to allocate + *

          The tensor will have the same shape as the source array and its data will be copied. + * + * @param src the source array giving the shape and data to the new tensor * @return the new tensor */ - static Tensor ofShape(Shape shape) { - return Tensor.allocate(DTYPE, shape); + static Tensor tensorOf(NdArray src) { + return Tensor.of(DTYPE, src.shape(), src::copyTo); } /** * Allocates a new tensor of the given shape. * - *

          Invoking {@code ofShape(x, y, z)} is equivalent to {@code ofShape(Shape.make(x, y, z))} - * - * @param dimensionSizes dimension sizes that defines the shape of the tensor to allocate + * @param shape shape of the tensor to allocate * @return the new tensor */ - static Tensor ofShape(long... dimensionSizes) { - return Tensor.allocate(DTYPE, Shape.make(dimensionSizes)); + static Tensor tensorOf(Shape shape) { + return Tensor.of(DTYPE, shape); } /** - * Allocates a new tensor which is a copy of a given array of floats. - * - *

          The tensor will have the same shape as the source array and its data will be copied. + * Allocates a new tensor of the given shape and initialize its data. * - * @param src the source array giving the shape and data to the new tensor + * @param shape shape of the tensor to allocate + * @param dataInit tensor data initializer * @return the new tensor + * @throws org.tensorflow.TensorFlowException if the tensor cannot be allocated or initialized */ - static Tensor copyOf(NdArray src) { - return Tensor.allocate(DTYPE, src.shape(), src::copyTo); + static Tensor tensorOf(Shape shape, Consumer dataInit) { + return Tensor.of(DTYPE, shape, dataInit); } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBool.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBool.java index 4ab13075762..734105843d0 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBool.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TBool.java @@ -17,6 +17,7 @@ package org.tensorflow.types; +import java.util.function.Consumer; import org.tensorflow.DataType; import org.tensorflow.Tensor; import org.tensorflow.internal.buffer.TensorBuffers; @@ -25,6 +26,7 @@ import org.tensorflow.tools.buffer.BooleanDataBuffer; import org.tensorflow.tools.ndarray.BooleanNdArray; import org.tensorflow.tools.ndarray.NdArray; +import org.tensorflow.tools.ndarray.StdArrays; import org.tensorflow.tools.ndarray.impl.dense.BooleanDenseNdArray; import org.tensorflow.types.family.TType; @@ -48,7 +50,7 @@ public interface TBool extends BooleanNdArray, TType { * @return the new tensor */ static Tensor scalarOf(boolean value) { - return Tensor.allocate(DTYPE, Shape.scalar(), data -> data.setBoolean(value)); + return Tensor.of(DTYPE, Shape.scalar(), data -> data.setBoolean(value)); } /** @@ -58,41 +60,44 @@ static Tensor scalarOf(boolean value) { * @return the new tensor */ static Tensor vectorOf(boolean... values) { - return Tensor.allocate(DTYPE, Shape.make(values.length), data -> data.write(values)); + if (values == null) { + throw new IllegalArgumentException(); + } + return Tensor.of(DTYPE, Shape.of(values.length), data -> StdArrays.copyTo(data, values)); } /** - * Allocates a new tensor of the given shape. + * Allocates a new tensor which is a copy of a given array of booleans. * - * @param shape shape of the tensor to allocate + *

          The tensor will have the same shape as the source array and its data will be copied. + * + * @param src the source array giving the shape and data to the new tensor * @return the new tensor */ - static Tensor ofShape(Shape shape) { - return Tensor.allocate(DTYPE, shape); + static Tensor tensorOf(NdArray src) { + return Tensor.of(DTYPE, src.shape(), src::copyTo); } /** * Allocates a new tensor of the given shape. * - *

          Invoking {@code ofShape(x, y, z)} is equivalent to {@code ofShape(Shape.make(x, y, z))} - * - * @param dimensionSizes dimension sizes that defines the shape of the tensor to allocate + * @param shape shape of the tensor to allocate * @return the new tensor */ - static Tensor ofShape(long... dimensionSizes) { - return Tensor.allocate(DTYPE, Shape.make(dimensionSizes)); + static Tensor tensorOf(Shape shape) { + return Tensor.of(DTYPE, shape); } /** - * Allocates a new tensor which is a copy of a given array of booleans. - * - *

          The tensor will have the same shape as the source array and its data will be copied. + * Allocates a new tensor of the given shape and initialize its data. * - * @param src the source array giving the shape and data to the new tensor + * @param shape shape of the tensor to allocate + * @param dataInit tensor data initializer * @return the new tensor + * @throws org.tensorflow.TensorFlowException if the tensor cannot be allocated or initialized */ - static Tensor copyOf(NdArray src) { - return Tensor.allocate(DTYPE, src.shape(), src::copyTo); + static Tensor tensorOf(Shape shape, Consumer dataInit) { + return Tensor.of(DTYPE, shape, dataInit); } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat16.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat16.java index 7abda3381ef..91d3eb36d51 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat16.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat16.java @@ -17,6 +17,7 @@ package org.tensorflow.types; +import java.util.function.Consumer; import org.tensorflow.DataType; import org.tensorflow.Tensor; import org.tensorflow.internal.buffer.TensorBuffers; @@ -26,6 +27,7 @@ import org.tensorflow.tools.buffer.layout.DataLayouts; import org.tensorflow.tools.ndarray.FloatNdArray; import org.tensorflow.tools.ndarray.NdArray; +import org.tensorflow.tools.ndarray.StdArrays; import org.tensorflow.tools.ndarray.impl.dense.FloatDenseNdArray; import org.tensorflow.types.family.TNumber; @@ -53,7 +55,7 @@ public interface TFloat16 extends FloatNdArray, TNumber { * @return the new tensor */ static Tensor scalarOf(float value) { - return Tensor.allocate(DTYPE, Shape.scalar(), data -> data.setFloat(value)); + return Tensor.of(DTYPE, Shape.scalar(), data -> data.setFloat(value)); } /** @@ -63,41 +65,44 @@ static Tensor scalarOf(float value) { * @return the new tensor */ static Tensor vectorOf(float... values) { - return Tensor.allocate(DTYPE, Shape.make(values.length), data -> data.write(values)); + if (values == null) { + throw new IllegalArgumentException(); + } + return Tensor.of(DTYPE, Shape.of(values.length), data -> StdArrays.copyTo(data, values)); } /** - * Allocates a new tensor of the given shape. + * Allocates a new tensor which is a copy of a given array of floats. * - * @param shape shape of the tensor to allocate + *

          The tensor will have the same shape as the source array and its data will be copied. + * + * @param src the source array giving the shape and data to the new tensor * @return the new tensor */ - static Tensor ofShape(Shape shape) { - return Tensor.allocate(DTYPE, shape); + static Tensor tensorOf(NdArray src) { + return Tensor.of(DTYPE, src.shape(), src::copyTo); } /** * Allocates a new tensor of the given shape. * - *

          Invoking {@code ofShape(x, y, z)} is equivalent to {@code ofShape(Shape.make(x, y, z))} - * - * @param dimensionSizes dimension sizes that defines the shape of the tensor to allocate + * @param shape shape of the tensor to allocate * @return the new tensor */ - static Tensor ofShape(long... dimensionSizes) { - return Tensor.allocate(DTYPE, Shape.make(dimensionSizes)); + static Tensor tensorOf(Shape shape) { + return Tensor.of(DTYPE, shape); } /** - * Allocates a new tensor which is a copy of a given array of floats. - * - *

          The tensor will have the same shape as the source array and its data will be copied. + * Allocates a new tensor of the given shape and initialize its data. * - * @param src the source array giving the shape and data to the new tensor + * @param shape shape of the tensor to allocate + * @param dataInit tensor data initializer * @return the new tensor + * @throws org.tensorflow.TensorFlowException if the tensor cannot be allocated or initialized */ - static Tensor copyOf(NdArray src) { - return Tensor.allocate(DTYPE, src.shape(), src::copyTo); + static Tensor tensorOf(Shape shape, Consumer dataInit) { + return Tensor.of(DTYPE, shape, dataInit); } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat32.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat32.java index f64d8465dcc..4e31fd87c8a 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat32.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat32.java @@ -17,6 +17,7 @@ package org.tensorflow.types; +import java.util.function.Consumer; import org.tensorflow.DataType; import org.tensorflow.Tensor; import org.tensorflow.internal.buffer.TensorBuffers; @@ -25,6 +26,7 @@ import org.tensorflow.tools.buffer.FloatDataBuffer; import org.tensorflow.tools.ndarray.FloatNdArray; import org.tensorflow.tools.ndarray.NdArray; +import org.tensorflow.tools.ndarray.StdArrays; import org.tensorflow.tools.ndarray.impl.dense.FloatDenseNdArray; import org.tensorflow.types.family.TNumber; @@ -43,7 +45,7 @@ public interface TFloat32 extends FloatNdArray, TNumber { * @return the new tensor */ static Tensor scalarOf(float value) { - return Tensor.allocate(DTYPE, Shape.scalar(), data -> data.setFloat(value)); + return Tensor.of(DTYPE, Shape.scalar(), data -> data.setFloat(value)); } /** @@ -53,41 +55,44 @@ static Tensor scalarOf(float value) { * @return the new tensor */ static Tensor vectorOf(float... values) { - return Tensor.allocate(DTYPE, Shape.make(values.length), data -> data.write(values)); + if (values == null) { + throw new IllegalArgumentException(); + } + return Tensor.of(DTYPE, Shape.of(values.length), data -> StdArrays.copyTo(data, values)); } /** - * Allocates a new tensor of the given shape. + * Allocates a new tensor which is a copy of a given array of floats. * - * @param shape shape of the tensor to allocate + *

          The tensor will have the same shape as the source array and its data will be copied. + * + * @param src the source array giving the shape and data to the new tensor * @return the new tensor */ - static Tensor ofShape(Shape shape) { - return Tensor.allocate(DTYPE, shape); + static Tensor tensorOf(NdArray src) { + return Tensor.of(DTYPE, src.shape(), src::copyTo); } /** * Allocates a new tensor of the given shape. * - *

          Invoking {@code ofShape(x, y, z)} is equivalent to {@code ofShape(Shape.make(x, y, z))} - * - * @param dimensionSizes dimension sizes that defines the shape of the tensor to allocate + * @param shape shape of the tensor to allocate * @return the new tensor */ - static Tensor ofShape(long... dimensionSizes) { - return Tensor.allocate(DTYPE, Shape.make(dimensionSizes)); + static Tensor tensorOf(Shape shape) { + return Tensor.of(DTYPE, shape); } /** - * Allocates a new tensor which is a copy of a given array of floats. - * - *

          The tensor will have the same shape as the source array and its data will be copied. + * Allocates a new tensor of the given shape and initialize its data. * - * @param src the source array giving the shape and data to the new tensor + * @param shape shape of the tensor to allocate + * @param dataInit tensor data initializer * @return the new tensor + * @throws org.tensorflow.TensorFlowException if the tensor cannot be allocated or initialized */ - static Tensor copyOf(NdArray src) { - return Tensor.allocate(DTYPE, src.shape(), src::copyTo); + static Tensor tensorOf(Shape shape, Consumer dataInit) { + return Tensor.of(DTYPE, shape, dataInit); } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat64.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat64.java index 246af037951..8696a1eae06 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat64.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TFloat64.java @@ -17,6 +17,7 @@ package org.tensorflow.types; +import java.util.function.Consumer; import org.tensorflow.DataType; import org.tensorflow.Tensor; import org.tensorflow.internal.buffer.TensorBuffers; @@ -25,6 +26,7 @@ import org.tensorflow.tools.buffer.DoubleDataBuffer; import org.tensorflow.tools.ndarray.DoubleNdArray; import org.tensorflow.tools.ndarray.NdArray; +import org.tensorflow.tools.ndarray.StdArrays; import org.tensorflow.tools.ndarray.impl.dense.DoubleDenseNdArray; import org.tensorflow.types.family.TNumber; @@ -43,7 +45,7 @@ public interface TFloat64 extends DoubleNdArray, TNumber { * @return the new tensor */ static Tensor scalarOf(double value) { - return Tensor.allocate(DTYPE, Shape.scalar(), data -> data.setDouble(value)); + return Tensor.of(DTYPE, Shape.scalar(), data -> data.setDouble(value)); } /** @@ -53,41 +55,44 @@ static Tensor scalarOf(double value) { * @return the new tensor */ static Tensor vectorOf(double... values) { - return Tensor.allocate(DTYPE, Shape.make(values.length), data -> data.write(values)); + if (values == null) { + throw new IllegalArgumentException(); + } + return Tensor.of(DTYPE, Shape.of(values.length), data -> StdArrays.copyTo(data, values)); } /** - * Allocates a new tensor of the given shape. + * Allocates a new tensor which is a copy of a given array of doubles. * - * @param shape shape of the tensor to allocate + *

          The tensor will have the same shape as the source array and its data will be copied. + * + * @param src the source array giving the shape and data to the new tensor * @return the new tensor */ - static Tensor ofShape(Shape shape) { - return Tensor.allocate(DTYPE, shape); + static Tensor tensorOf(NdArray src) { + return Tensor.of(DTYPE, src.shape(), src::copyTo); } /** * Allocates a new tensor of the given shape. * - *

          Invoking {@code ofShape(x, y, z)} is equivalent to {@code ofShape(Shape.make(x, y, z))} - * - * @param dimensionSizes dimension sizes that defines the shape of the tensor to allocate + * @param shape shape of the tensor to allocate * @return the new tensor */ - static Tensor ofShape(long... dimensionSizes) { - return Tensor.allocate(DTYPE, Shape.make(dimensionSizes)); + static Tensor tensorOf(Shape shape) { + return Tensor.of(DTYPE, shape); } /** - * Allocates a new tensor which is a copy of a given array of doubles. - * - *

          The tensor will have the same shape as the source array and its data will be copied. + * Allocates a new tensor of the given shape and initialize its data. * - * @param src the source array giving the shape and data to the new tensor + * @param shape shape of the tensor to allocate + * @param dataInit tensor data initializer * @return the new tensor + * @throws org.tensorflow.TensorFlowException if the tensor cannot be allocated or initialized */ - static Tensor copyOf(NdArray src) { - return Tensor.allocate(DTYPE, src.shape(), src::copyTo); + static Tensor tensorOf(Shape shape, Consumer dataInit) { + return Tensor.of(DTYPE, shape, dataInit); } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt32.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt32.java index 6294391177d..ae3c8fefaea 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt32.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt32.java @@ -17,6 +17,7 @@ package org.tensorflow.types; +import java.util.function.Consumer; import org.tensorflow.DataType; import org.tensorflow.Tensor; import org.tensorflow.internal.buffer.TensorBuffers; @@ -25,6 +26,7 @@ import org.tensorflow.tools.buffer.IntDataBuffer; import org.tensorflow.tools.ndarray.IntNdArray; import org.tensorflow.tools.ndarray.NdArray; +import org.tensorflow.tools.ndarray.StdArrays; import org.tensorflow.tools.ndarray.impl.dense.IntDenseNdArray; import org.tensorflow.types.family.TNumber; @@ -43,7 +45,7 @@ public interface TInt32 extends IntNdArray, TNumber { * @return the new tensor */ static Tensor scalarOf(int value) { - return Tensor.allocate(DTYPE, Shape.scalar(), data -> data.setInt(value)); + return Tensor.of(DTYPE, Shape.scalar(), data -> data.setInt(value)); } /** @@ -51,43 +53,47 @@ static Tensor scalarOf(int value) { * * @param values ints to store in the new tensor * @return the new tensor + * @throws IllegalArgumentException if no values are provided */ static Tensor vectorOf(int... values) { - return Tensor.allocate(DTYPE, Shape.make(values.length), data -> data.write(values)); + if (values == null) { + throw new IllegalArgumentException(); + } + return Tensor.of(DTYPE, Shape.of(values.length), data -> StdArrays.copyTo(data, values)); } /** - * Allocates a new tensor of the given shape. + * Allocates a new tensor which is a copy of a given array of ints. * - * @param shape shape of the tensor to allocate + *

          The tensor will have the same shape as the source array and its data will be copied. + * + * @param src the source array giving the shape and data to the new tensor * @return the new tensor */ - static Tensor ofShape(Shape shape) { - return Tensor.allocate(DTYPE, shape); + static Tensor tensorOf(NdArray src) { + return Tensor.of(DTYPE, src.shape(), src::copyTo); } /** * Allocates a new tensor of the given shape. * - *

          Invoking {@code ofShape(x, y, z)} is equivalent to {@code ofShape(Shape.make(x, y, z))} - * - * @param dimensionSizes dimension sizes that defines the shape of the tensor to allocate + * @param shape shape of the tensor to allocate * @return the new tensor */ - static Tensor ofShape(long... dimensionSizes) { - return Tensor.allocate(DTYPE, Shape.make(dimensionSizes)); + static Tensor tensorOf(Shape shape) { + return Tensor.of(DTYPE, shape); } /** - * Allocates a new tensor which is a copy of a given array of ints. - * - *

          The tensor will have the same shape as the source array and its data will be copied. + * Allocates a new tensor of the given shape and initialize its data. * - * @param src the source array giving the shape and data to the new tensor + * @param shape shape of the tensor to allocate + * @param dataInit tensor data initializer * @return the new tensor + * @throws org.tensorflow.TensorFlowException if the tensor cannot be allocated or initialized */ - static Tensor copyOf(NdArray src) { - return Tensor.allocate(DTYPE, src.shape(), src::copyTo); + static Tensor tensorOf(Shape shape, Consumer dataInit) { + return Tensor.of(DTYPE, shape, dataInit); } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt64.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt64.java index 4968295786c..5e47ec5847f 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt64.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TInt64.java @@ -17,6 +17,7 @@ package org.tensorflow.types; +import java.util.function.Consumer; import org.tensorflow.DataType; import org.tensorflow.Tensor; import org.tensorflow.internal.buffer.TensorBuffers; @@ -25,6 +26,7 @@ import org.tensorflow.tools.buffer.LongDataBuffer; import org.tensorflow.tools.ndarray.LongNdArray; import org.tensorflow.tools.ndarray.NdArray; +import org.tensorflow.tools.ndarray.StdArrays; import org.tensorflow.tools.ndarray.impl.dense.LongDenseNdArray; import org.tensorflow.types.family.TNumber; @@ -43,7 +45,7 @@ public interface TInt64 extends LongNdArray, TNumber { * @return the new tensor */ static Tensor scalarOf(long value) { - return Tensor.allocate(DTYPE, Shape.scalar(), data -> data.setLong(value)); + return Tensor.of(DTYPE, Shape.scalar(), data -> data.setLong(value)); } /** @@ -53,41 +55,44 @@ static Tensor scalarOf(long value) { * @return the new tensor */ static Tensor vectorOf(long... values) { - return Tensor.allocate(DTYPE, Shape.make(values.length), data -> data.write(values)); + if (values == null) { + throw new IllegalArgumentException(); + } + return Tensor.of(DTYPE, Shape.of(values.length), data -> StdArrays.copyTo(data, values)); } /** - * Allocates a new tensor of the given shape. + * Allocates a new tensor which is a copy of a given array of longs. * - * @param shape shape of the tensor to allocate + *

          The tensor will have the same shape as the source array and its data will be copied. + * + * @param src the source array giving the shape and data to the new tensor * @return the new tensor */ - static Tensor ofShape(Shape shape) { - return Tensor.allocate(DTYPE, shape); + static Tensor tensorOf(NdArray src) { + return Tensor.of(DTYPE, src.shape(), src::copyTo); } /** * Allocates a new tensor of the given shape. * - *

          Invoking {@code ofShape(x, y, z)} is equivalent to {@code ofShape(Shape.make(x, y, z))} - * - * @param dimensionSizes dimension sizes that defines the shape of the tensor to allocate + * @param shape shape of the tensor to allocate * @return the new tensor */ - static Tensor ofShape(long... dimensionSizes) { - return Tensor.allocate(DTYPE, Shape.make(dimensionSizes)); + static Tensor tensorOf(Shape shape) { + return Tensor.of(DTYPE, shape); } /** - * Allocates a new tensor which is a copy of a given array of longs. - * - *

          The tensor will have the same shape as the source array and its data will be copied. + * Allocates a new tensor of the given shape and initialize its data. * - * @param src the source array giving the shape and data to the new tensor + * @param shape shape of the tensor to allocate + * @param dataInit tensor data initializer * @return the new tensor + * @throws org.tensorflow.TensorFlowException if the tensor cannot be allocated or initialized */ - static Tensor copyOf(NdArray src) { - return Tensor.allocate(DTYPE, src.shape(), src::copyTo); + static Tensor tensorOf(Shape shape, Consumer dataInit) { + return Tensor.of(DTYPE, shape, dataInit); } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TString.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TString.java index db6f8251b95..15b492b5ed7 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TString.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TString.java @@ -59,7 +59,7 @@ public interface TString extends NdArray, TType { * @return the new tensor */ static Tensor scalarOf(String value) { - return copyOf(NdArrays.scalarOfObject(value)); + return tensorOf(NdArrays.scalarOfObject(value)); } /** @@ -71,7 +71,10 @@ static Tensor scalarOf(String value) { * @return the new tensor */ static Tensor vectorOf(String... values) { - return copyOf(NdArrays.vectorOfObjects(values)); + if (values == null) { + throw new IllegalArgumentException(); + } + return tensorOf(NdArrays.vectorOfObjects(values)); } /** @@ -83,8 +86,8 @@ static Tensor vectorOf(String... values) { * @param src the source array giving the shape and data to the new tensor * @return the new tensor */ - static Tensor copyOf(NdArray src) { - return copyOf(StandardCharsets.UTF_8, src); + static Tensor tensorOf(NdArray src) { + return tensorOf(StandardCharsets.UTF_8, src); } /** @@ -94,13 +97,13 @@ static Tensor copyOf(NdArray src) { * strings are encoded into bytes using the charset passed in parameter. * *

          If charset is different than default UTF-8, then it must also be provided explicitly - * when reading data from the tensor, using {@link #use(Charset)}:

          + * when reading data from the tensor, using {@link #using(Charset)}:

          * *
          {@code
              * // Given `originalStrings` an initialized vector of strings
          -   * Tensor tensor = TString.copyOf(Charsets.UTF_16, originalStrings);
          +   * Tensor tensor = TString.tensorOf(Charsets.UTF_16, originalStrings);
              * ...
          -   * TString tensorStrings = tensor.data().use(Charsets.UTF_16);
          +   * TString tensorStrings = tensor.data().using(Charsets.UTF_16);
              * assertEquals(originalStrings.getObject(0), tensorStrings.getObject(0));
              * }
          * @@ -108,7 +111,7 @@ static Tensor copyOf(NdArray src) { * @param src the source array giving the shape and data to the new tensor * @return the new tensor */ - static Tensor copyOf(Charset charset, NdArray src) { + static Tensor tensorOf(Charset charset, NdArray src) { return TStringImpl.createTensor(src, s -> s.getBytes(charset)); } @@ -127,7 +130,7 @@ static Tensor copyOf(Charset charset, NdArray src) { * @param src the source array giving the shape and data to the new tensor * @return the new tensor */ - static Tensor copyOfBytes(NdArray src) { + static Tensor tensorOfBytes(NdArray src) { return TStringImpl.createTensor(src, Function.identity()); } @@ -139,15 +142,15 @@ static Tensor copyOfBytes(NdArray src) { * *
          {@code
              * Tensor tensor =
          -   *    TString.copyOf(StandardCharsets.UTF_16, NdArrays.scalarOfObject("TensorFlow");
          +   *    TString.tensorOf(StandardCharsets.UTF_16, NdArrays.scalarOfObject("TensorFlow");
              *
          -   * assertEquals("TensorFlow", tensor.data().use(StandardCharsets.UTF_16).getObject());
          +   * assertEquals("TensorFlow", tensor.data().using(StandardCharsets.UTF_16).getObject());
              * }
          * * @param charset charset to use * @return string tensor data using this charset */ - TString use(Charset charset); + TString using(Charset charset); /** * @return the tensor data as a n-dimensional array of raw byte sequences. @@ -161,18 +164,18 @@ static Tensor copyOfBytes(NdArray src) { class TStringImpl extends DenseNdArray implements TString { @Override - public TString use(Charset charset) { + public TString using(Charset charset) { return new TStringImpl(tensorBuffer, DataLayouts.ofStrings(charset), shape()); } @Override public NdArray asBytes() { - return NdArrays.wrap(tensorBuffer, shape()); + return NdArrays.wrap(shape(), tensorBuffer); } static Tensor createTensor(NdArray src, Function getBytes) { long size = StringTensorBuffer.computeSize(src, getBytes); - return Tensor.allocate(TString.DTYPE, src.shape(), size, data -> + return Tensor.of(TString.DTYPE, src.shape(), size, data -> ((TStringImpl)data).tensorBuffer.init(src, getBytes) ); } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TUint8.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TUint8.java index 90b544730a1..85c55f7a2e4 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TUint8.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TUint8.java @@ -17,6 +17,7 @@ package org.tensorflow.types; +import java.util.function.Consumer; import org.tensorflow.DataType; import org.tensorflow.Tensor; import org.tensorflow.internal.buffer.TensorBuffers; @@ -25,6 +26,7 @@ import org.tensorflow.tools.buffer.ByteDataBuffer; import org.tensorflow.tools.ndarray.ByteNdArray; import org.tensorflow.tools.ndarray.NdArray; +import org.tensorflow.tools.ndarray.StdArrays; import org.tensorflow.tools.ndarray.impl.dense.ByteDenseNdArray; import org.tensorflow.types.family.TNumber; @@ -43,7 +45,7 @@ public interface TUint8 extends ByteNdArray, TNumber { * @return the new tensor */ static Tensor scalarOf(byte value) { - return Tensor.allocate(DTYPE, Shape.scalar(), data -> data.setByte(value)); + return Tensor.of(DTYPE, Shape.scalar(), data -> data.setByte(value)); } /** @@ -53,41 +55,44 @@ static Tensor scalarOf(byte value) { * @return the new tensor */ static Tensor vectorOf(byte... values) { - return Tensor.allocate(DTYPE, Shape.make(values.length), data -> data.write(values)); + if (values == null) { + throw new IllegalArgumentException(); + } + return Tensor.of(DTYPE, Shape.of(values.length), data -> StdArrays.copyTo(data, values)); } /** - * Allocates a new tensor of the given shape. + * Allocates a new tensor which is a copy of a given array of bytes. * - * @param shape shape of the tensor to allocate + *

          The tensor will have the same shape as the source array and its data will be copied. + * + * @param src the source array giving the shape and data to the new tensor * @return the new tensor */ - static Tensor ofShape(Shape shape) { - return Tensor.allocate(DTYPE, shape); + static Tensor tensorOf(NdArray src) { + return Tensor.of(DTYPE, src.shape(), src::copyTo); } /** * Allocates a new tensor of the given shape. * - *

          Invoking {@code ofShape(x, y, z)} is equivalent to {@code ofShape(Shape.make(x, y, z))} - * - * @param dimensionSizes dimension sizes that defines the shape of the tensor to allocate + * @param shape shape of the tensor to allocate * @return the new tensor */ - static Tensor ofShape(long... dimensionSizes) { - return Tensor.allocate(DTYPE, Shape.make(dimensionSizes)); + static Tensor tensorOf(Shape shape) { + return Tensor.of(DTYPE, shape); } /** - * Allocates a new tensor which is a copy of a given array of bytes. - * - *

          The tensor will have the same shape as the source array and its data will be copied. + * Allocates a new tensor of the given shape and initialize its data. * - * @param src the source array giving the shape and data to the new tensor + * @param shape shape of the tensor to allocate + * @param dataInit tensor data initializer * @return the new tensor + * @throws org.tensorflow.TensorFlowException if the tensor cannot be allocated or initialized */ - static Tensor copyOf(NdArray src) { - return Tensor.allocate(DTYPE, src.shape(), src::copyTo); + static Tensor tensorOf(Shape shape, Consumer dataInit) { + return Tensor.of(DTYPE, shape, dataInit); } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/package-info.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/package-info.java index bdce85453e3..c6d0e45d064 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/package-info.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/package-info.java @@ -35,8 +35,8 @@ * {@link org.tensorflow.Tensor#data() Tensor.data()}. * *

          Note that while it is always possible to allocate a tensor using the - * {@link org.tensorflow.Tensor#allocate(org.tensorflow.DataType, org.tensorflow.tools.Shape) Tensor.allocate(...)} + * {@link org.tensorflow.Tensor#of(org.tensorflow.DataType, org.tensorflow.tools.Shape) Tensor.of(...)} * method, most tensor types expose factory methods that simplify the creation process, like - * {@code scalarOf(...)}, {@code vectorOf(...)}, {@code ofShape(...)}, etc. + * {@code scalarOf(...)}, {@code vectorOf(...)}, {@code tensorOf(...)}, etc. */ package org.tensorflow.types; diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerOperationBuilderTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerOperationBuilderTest.java index efa4d762bc6..6b7da570249 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerOperationBuilderTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerOperationBuilderTest.java @@ -119,12 +119,12 @@ public void setAttrs() { // shape opBuilder(session, "EnsureShape", "ShapeAttr") .addInput(TestUtil.constant(session, "Const", new int[2][2])) - .setAttr("shape", Shape.make(2, 2)) + .setAttr("shape", Shape.of(2, 2)) .build(); // list(shape) opBuilder(session, "FIFOQueue", "queue") .setAttr("component_types", new DataType[] {TInt32.DTYPE, TInt32.DTYPE}) - .setAttr("shapes", new Shape[] {Shape.make(2, 2), Shape.make(2, 2, 2)}) + .setAttr("shapes", new Shape[] {Shape.of(2, 2), Shape.of(2, 2, 2)}) .build(); // bool opBuilder(session, "All", "Bool") diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerSessionTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerSessionTest.java index b9c755e544e..b9133b64c61 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerSessionTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerSessionTest.java @@ -133,6 +133,7 @@ public void addingReferenceToClosedSessionFails() { @Test public void defaultSession() throws Exception { + EagerSession.closeDefaultForTest(); EagerSession.Options options = EagerSession.options().resourceCleanupStrategy(ResourceCleanupStrategy.ON_SESSION_CLOSE); EagerSession.initDefault(options); diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationBuilderTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationBuilderTest.java index 32da807c520..c7bbc2ba9b9 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationBuilderTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationBuilderTest.java @@ -143,7 +143,7 @@ public void setAttrShape() { n = g.opBuilder("Placeholder", "batch_of_vectors") .setAttr("dtype", TFloat32.DTYPE) - .setAttr("shape", Shape.make(-1, 784)) + .setAttr("shape", Shape.of(-1, 784)) .build() .output(0); assertEquals(2, n.shape().numDimensions()); @@ -156,10 +156,10 @@ public void setAttrShape() { @Test public void setAttrShapeList() { // Those shapes match tensors ones, so no exception is thrown - testSetAttrShapeList(new Shape[] {Shape.make(2, 2), Shape.make(2, 2, 2)}); + testSetAttrShapeList(new Shape[] {Shape.of(2, 2), Shape.of(2, 2, 2)}); try { // Those shapes do not match tensors ones, exception is thrown - testSetAttrShapeList(new Shape[] {Shape.make(2, 2), Shape.make(2, 2, 2, 2)}); + testSetAttrShapeList(new Shape[] {Shape.of(2, 2), Shape.of(2, 2, 2, 2)}); fail("Shapes are incompatible and an exception was expected"); } catch (IllegalArgumentException e) { // expected diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/benchmark/TensorBenchmark.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/benchmark/TensorBenchmark.java new file mode 100644 index 00000000000..3d378734dfe --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/benchmark/TensorBenchmark.java @@ -0,0 +1,140 @@ +package org.tensorflow.benchmark; + +import static org.tensorflow.tools.ndarray.NdArrays.vectorOf; + +import java.io.IOException; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.runner.RunnerException; +import org.tensorflow.tools.Shape; +import org.tensorflow.tools.buffer.DataBuffers; +import org.tensorflow.tools.ndarray.IntNdArray; +import org.tensorflow.tools.ndarray.NdArrays; +import org.tensorflow.tools.ndarray.StdArrays; +import org.tensorflow.types.TInt32; + +@Fork(value = 1, jvmArgs = {"-Xms4G", "-Xmx4G"}) +@BenchmarkMode(Mode.AverageTime) +@Warmup(iterations = 3) +@Measurement(iterations = 5) +@State(Scope.Benchmark) +public class TensorBenchmark { + + public static void main(String[] args) throws IOException, RunnerException { + org.openjdk.jmh.Main.main(args); + } + + @Benchmark + @Measurement(batchSize = 1000) + public void initTensorByStdArrays() { + int[][][][] data = new int[][][][] { + { + { + {0, 0, 0}, {0, 0, 1}, {0, 0, 2} + }, + { + {0, 1, 0}, {0, 1, 1}, {0, 1, 2} + }, + { + {0, 2, 0}, {0, 2, 1}, {0, 2, 2} + } + }, { + { + {1, 0, 0}, {1, 0, 1}, {1, 0, 2} + }, + { + {1, 1, 0}, {1, 1, 1}, {1, 1, 2} + }, + { + {1, 2, 0}, {1, 2, 1}, {1, 2, 2} + } + }, { + { + {2, 0, 0}, {2, 0, 1}, {2, 0, 2} + }, + { + {2, 1, 0}, {2, 1, 1}, {2, 1, 2} + }, + { + {2, 2, 0}, {2, 2, 1}, {2, 2, 2} + } + } + }; + TInt32.tensorOf(StdArrays.shapeOf(data), d -> StdArrays.copyTo(d, data)); + } + + @Benchmark + @Measurement(batchSize = 1000) + public void initTensorByVectors() { + TInt32.tensorOf(Shape.of(3, 3, 3, 3), d -> d + .set(vectorOf(0, 0, 0), 0, 0, 0) + .set(vectorOf(0, 0, 1), 0, 0, 1) + .set(vectorOf(0, 0, 2), 0, 0, 2) + .set(vectorOf(0, 1, 0), 0, 1, 0) + .set(vectorOf(0, 1, 1), 0, 1, 1) + .set(vectorOf(0, 1, 2), 0, 1, 2) + .set(vectorOf(0, 2, 0), 0, 2, 0) + .set(vectorOf(0, 2, 1), 0, 2, 1) + .set(vectorOf(0, 2, 2), 0, 2, 2) + .set(vectorOf(1, 0, 0), 1, 0, 0) + .set(vectorOf(1, 0, 1), 1, 0, 1) + .set(vectorOf(1, 0, 2), 1, 0, 2) + .set(vectorOf(1, 1, 0), 1, 1, 0) + .set(vectorOf(1, 1, 1), 1, 1, 1) + .set(vectorOf(1, 1, 2), 1, 1, 2) + .set(vectorOf(1, 2, 0), 1, 2, 0) + .set(vectorOf(1, 2, 1), 1, 2, 1) + .set(vectorOf(1, 2, 2), 1, 2, 2) + .set(vectorOf(2, 0, 0), 2, 0, 0) + .set(vectorOf(2, 0, 1), 2, 0, 1) + .set(vectorOf(2, 0, 2), 2, 0, 2) + .set(vectorOf(2, 1, 0), 2, 1, 0) + .set(vectorOf(2, 1, 1), 2, 1, 1) + .set(vectorOf(2, 1, 2), 2, 1, 2) + .set(vectorOf(2, 2, 0), 2, 2, 0) + .set(vectorOf(2, 2, 1), 2, 2, 1) + .set(vectorOf(2, 2, 2), 2, 2, 2) + ); + } + + @Benchmark + @Measurement(batchSize = 1000) + public void initTensorByFlatArray() { + IntNdArray data = NdArrays.wrap(Shape.of(3, 3, 3, 3), DataBuffers.of( + 0, 0, 0, + 0, 0, 1, + 0, 0, 2, + 0, 1, 0, + 0, 1, 1, + 0, 1, 2, + 0, 2, 0, + 0, 2, 1, + 0, 2, 2, + 1, 0, 0, + 1, 0, 1, + 1, 0, 2, + 1, 1, 0, + 1, 1, 1, + 1, 1, 2, + 1, 2, 0, + 1, 2, 1, + 1, 2, 2, + 2, 0, 0, + 2, 0, 1, + 2, 0, 2, + 2, 1, 0, + 2, 1, 1, + 2, 1, 2, + 2, 2, 0, + 2, 2, 1, + 2, 2, 2 + )); + TInt32.tensorOf(data); + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ConstantTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ConstantTest.java index c9243280361..8bf16beacac 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ConstantTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ConstantTest.java @@ -16,7 +16,6 @@ package org.tensorflow.op.core; import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.io.ByteArrayOutputStream; @@ -34,9 +33,8 @@ import org.tensorflow.Session; import org.tensorflow.Tensor; import org.tensorflow.op.Scope; -import org.tensorflow.types.TBool; -import org.tensorflow.types.TFloat64; import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -44,20 +42,6 @@ @RunWith(JUnit4.class) public class ConstantTest { private static final float EPSILON = 1e-7f; - - @Test - public void createInt() { - int value = 1; - - try (Graph g = new Graph(); - Session sess = new Session(g)) { - Scope scope = new Scope(g); - Constant op = Constant.create(scope, value); - try (Tensor result = sess.runner().fetch(op).run().get(0).expect(TInt32.DTYPE)) { - assertEquals(value, result.intValue()); - } - } - } @Test public void createIntBuffer() { @@ -75,20 +59,6 @@ public void createIntBuffer() { } } - @Test - public void createTFloat() { - float value = 1; - - try (Graph g = new Graph(); - Session sess = new Session(g)) { - Scope scope = new Scope(g); - Constant op = Constant.create(scope, value); - try (Tensor result = sess.runner().fetch(op).run().get(0)) { - assertEquals(value, result.expect(TFloat32.DTYPE).floatValue(), 0.0f); - } - } - } - @Test public void createTFloatBuffer() { float[] floats = {1, 2, 3, 4}; @@ -105,20 +75,6 @@ public void createTFloatBuffer() { } } - @Test - public void createTDouble() { - double value = 1; - - try (Graph g = new Graph(); - Session sess = new Session(g)) { - Scope scope = new Scope(g); - Constant op = Constant.create(scope, value); - try (Tensor result = sess.runner().fetch(op).run().get(0)) { - assertEquals(value, result.expect(TFloat64.DTYPE).doubleValue(), 0.0); - } - } - } - @Test public void createTDoubleBuffer() { double[] doubles = {1, 2, 3, 4}; @@ -135,20 +91,6 @@ public void createTDoubleBuffer() { } } - @Test - public void createLong() { - long value = 1; - - try (Graph g = new Graph(); - Session sess = new Session(g)) { - Scope scope = new Scope(g); - Constant op = Constant.create(scope, value); - try (Tensor result = sess.runner().fetch(op).run().get(0)) { - assertEquals(value, result.expect(TInt64.DTYPE).longValue()); - } - } - } - @Test public void createLongBuffer() { long[] longs = {1, 2, 3, 4}; @@ -165,20 +107,6 @@ public void createLongBuffer() { } } - @Test - public void createBoolean() { - boolean value = true; - - try (Graph g = new Graph(); - Session sess = new Session(g)) { - Scope scope = new Scope(g); - Constant op = Constant.create(scope, value); - try (Tensor result = sess.runner().fetch(op).run().get(0)) { - assertEquals(value, result.expect(TBool.DTYPE).booleanValue()); - } - } - } - @Test public void createStringBuffer() throws IOException { byte[] data = {(byte) 1, (byte) 2, (byte) 3, (byte) 4}; diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/GeneratedOperationsTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/GeneratedOperationsTest.java index 64337ede0dc..08004ad5fc1 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/GeneratedOperationsTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/GeneratedOperationsTest.java @@ -37,7 +37,7 @@ public void tensorInputTensorOutput() { try (Graph g = new Graph(); Session sess = new Session(g)) { Ops ops = Ops.create(g); - Operand x = ops.math.add(ops.constant(1), ops.constant(2)); + Operand x = ops.math.add(ops.val(1), ops.val(2)); try (Tensor result = sess.runner().fetch(x).run().get(0).expect(TInt32.DTYPE)) { assertEquals(3, result.intValue()); } @@ -50,9 +50,9 @@ public void testListInputTensorOutput() { Session sess = new Session(g)) { Ops ops = Ops.create(g); ArrayList> inputs = new ArrayList<>(); - inputs.add(ops.constant(1)); - inputs.add(ops.constant(2)); - inputs.add(ops.constant(3)); + inputs.add(ops.val(1)); + inputs.add(ops.val(2)); + inputs.add(ops.val(3)); Operand x = ops.math.addN(inputs); try (Tensor result = sess.runner().fetch(x).run().get(0).expect(TInt32.DTYPE)) { assertEquals(6, result.intValue()); @@ -73,11 +73,11 @@ public void testControlDependencies() { Session sess = new Session(g)) { Ops ops = Ops.create(g); Operand variable = ops.variable(Shape.scalar(), TInt32.DTYPE); - Operand initVariable = ops.assign(variable, ops.constant(0)); + Operand initVariable = ops.assign(variable, ops.val(0)); ArrayList> controls = new ArrayList<>(); - controls.add(ops.assign(variable, ops.constant(3))); + controls.add(ops.assign(variable, ops.val(3))); Operand x = - ops.withControlDependencies(controls).math.add(variable, ops.constant(0)); + ops.withControlDependencies(controls).math.add(variable, ops.val(0)); sess.runner().addTarget(initVariable).run(); try (Tensor result = sess.runner().fetch(x).run().get(0).expect(TInt32.DTYPE)) { assertEquals(3, result.intValue()); diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ZerosTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ZerosTest.java index 5a9584e3142..e0f06415dde 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ZerosTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/op/core/ZerosTest.java @@ -27,8 +27,8 @@ import org.tensorflow.Tensor; import org.tensorflow.op.Scope; import org.tensorflow.types.TBool; -import org.tensorflow.types.TFloat64; import org.tensorflow.types.TFloat32; +import org.tensorflow.types.TFloat64; import org.tensorflow.types.TInt32; import org.tensorflow.types.TInt64; import org.tensorflow.types.TString; @@ -44,7 +44,7 @@ public void createIntZeros() { Session sess = new Session(g)) { Scope scope = new Scope(g); long[] shape = {2, 2}; - Zeros op = Zeros.create(scope, Constant.create(scope, shape), TInt32.DTYPE); + Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TInt32.DTYPE); try (Tensor result = sess.runner().fetch(op).run().get(0)) { int[][] actual = result.expect(TInt32.DTYPE).copyTo(new int[(int)shape[0]][(int)shape[1]]); for (int i = 0; i < actual.length; ++i) { @@ -62,7 +62,7 @@ public void createFloatZeros() { Session sess = new Session(g)) { Scope scope = new Scope(g); long[] shape = {2, 2}; - Zeros op = Zeros.create(scope, Constant.create(scope, shape), TFloat32.DTYPE); + Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TFloat32.DTYPE); try (Tensor result = sess.runner().fetch(op.asOutput()).run().get(0)) { float[][] actual = result.expect(TFloat32.DTYPE).copyTo(new float[(int)shape[0]][(int)shape[1]]); for (int i = 0; i < actual.length; ++i) { @@ -80,7 +80,7 @@ public void createDoubleZeros() { Session sess = new Session(g)) { Scope scope = new Scope(g); long[] shape = {2, 2}; - Zeros op = Zeros.create(scope, Constant.create(scope, shape), TFloat64.DTYPE); + Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TFloat64.DTYPE); try (Tensor result = sess.runner().fetch(op.asOutput()).run().get(0)) { double[][] actual = result.expect(TFloat64.DTYPE).copyTo(new double[(int)shape[0]][(int)shape[1]]); for (int i = 0; i < actual.length; ++i) { @@ -98,7 +98,7 @@ public void createLongZeros() { Session sess = new Session(g)) { Scope scope = new Scope(g); long[] shape = {2, 2}; - Zeros op = Zeros.create(scope, Constant.create(scope, shape), TInt64.DTYPE); + Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TInt64.DTYPE); try (Tensor result = sess.runner().fetch(op.asOutput()).run().get(0)) { long[][] actual = result.expect(TInt64.DTYPE).copyTo(new long[(int)shape[0]][(int)shape[1]]); for (int i = 0; i < actual.length; ++i) { @@ -116,7 +116,7 @@ public void createBooleanZeros() { Session sess = new Session(g)) { Scope scope = new Scope(g); long[] shape = {2, 2}; - Zeros op = Zeros.create(scope, Constant.create(scope, shape), TBool.DTYPE); + Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TBool.DTYPE); try (Tensor result = sess.runner().fetch(op.asOutput()).run().get(0)) { boolean[][] actual = result.expect(TBool.DTYPE).copyTo(new boolean[(int)shape[0]][(int)shape[1]]); for (int i = 0; i < actual.length; ++i) { @@ -134,7 +134,7 @@ public void createUint8Zeros() { Session sess = new Session(g)) { Scope scope = new Scope(g); long[] shape = {2, 2}; - Zeros op = Zeros.create(scope, Constant.create(scope, shape), TUint8.DTYPE); + Zeros op = Zeros.create(scope, Constant.vectorOf(scope, shape), TUint8.DTYPE); try (Tensor result = sess.runner().fetch(op.asOutput()).run().get(0)) { byte[][] actual = result.expect(TUint8.DTYPE).copyTo(new byte[(int)shape[0]][(int)shape[1]]); result.copyTo(actual); @@ -153,7 +153,7 @@ public void cannotCreateStringZeros() { Session sess = new Session(g)) { Scope scope = new Scope(g); long[] shape = {2, 2}; - Zeros.create(scope, Constant.create(scope, shape), TString.DTYPE); + Zeros.create(scope, Constant.vectorOf(scope, shape), TString.DTYPE); } } @@ -163,7 +163,7 @@ public void operationsComposingZerosAreCorrectlyNamed() { Session sess = new Session(g)) { Scope scope = new Scope(g); long[] shape = {2, 2}; - Zeros zeros = Zeros.create(scope.withSubScope("test"), Constant.create(scope, shape), TFloat32.DTYPE); + Zeros zeros = Zeros.create(scope.withSubScope("test"), Constant.vectorOf(scope, shape), TFloat32.DTYPE); List> results = sess.runner().addTarget("test/Zeros/Zero").addTarget("test/Zeros/Fill").run(); } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/NumericTypesTestBase.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/NumericTypesTestBase.java index 7d2554602d6..55c2ee3b37e 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/NumericTypesTestBase.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/NumericTypesTestBase.java @@ -34,12 +34,10 @@ abstract class NumericTypesTestBase, U> { - private static final float EPSILON_F = 1e-7f; - @Test public void initializeTensorsWithZeros() { // Allocate a tensor of 32-bits integer of the shape (2, 3, 2) - Tensor tensor = allocateTensor(Shape.make(2, 3, 2)); + Tensor tensor = allocateTensor(Shape.of(2, 3, 2)); NdArray tensorData = tensor.data(); assertEquals(3, tensorData.rank()); @@ -50,11 +48,11 @@ public void initializeTensorsWithZeros() { // Initialize tensor memory with zeros and take a snapshot tensorData.scalars().forEach(scalar -> scalar.setObject(valueOf(0))); - Constant x = tf.constant(tensor); + Constant x = tf.val(tensor); // Initialize the same tensor memory with ones and take a snapshot tensorData.scalars().forEach(scalar -> scalar.setObject(valueOf(1))); - Constant y = tf.constant(tensor); + Constant y = tf.val(tensor); // Subtract y from x and validate the result Sub sub = tf.math.sub(x, y); @@ -69,7 +67,7 @@ public void genericTest() { IntNdArray heapData = NdArrays.vectorOf(0, 1, 2, 3); // Creates a 2x2 matrix - try (Tensor tensor = TInt32.ofShape(2, 2)) { + try (Tensor tensor = TInt32.tensorOf(Shape.of(2, 2))) { IntNdArray tensorData = tensor.data(); // Copy first 2 values of the vector to the first row of the matrix @@ -95,7 +93,7 @@ public void genericTest() { Ops tf = Ops.create(session); // Compute the power of the tensor by itself - Constant x = tf.constant(tensor); + Constant x = tf.val(tensor); IntNdArray result = tf.math.pow(x, x).data(); // Validate result by computing the same operation in Java diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TBfloat16Test.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TBfloat16Test.java index 89f8d03524f..62e7b93f321 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TBfloat16Test.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TBfloat16Test.java @@ -24,7 +24,7 @@ public class TBfloat16Test extends NumericTypesTestBase { @Override Tensor allocateTensor(Shape shape) { - return TBfloat16.ofShape(shape); + return TBfloat16.tensorOf(shape); } @Override diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TFloat16Test.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TFloat16Test.java index bbd341d50fd..88ed5195abd 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TFloat16Test.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TFloat16Test.java @@ -24,7 +24,7 @@ public class TFloat16Test extends NumericTypesTestBase { @Override Tensor allocateTensor(Shape shape) { - return TFloat16.ofShape(shape); + return TFloat16.tensorOf(shape); } @Override diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TFloat32Test.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TFloat32Test.java index 4f9b63afdc0..ea00c121054 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TFloat32Test.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TFloat32Test.java @@ -24,7 +24,7 @@ public class TFloat32Test extends NumericTypesTestBase { @Override Tensor allocateTensor(Shape shape) { - return TFloat32.ofShape(shape); + return TFloat32.tensorOf(shape); } @Override diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TFloat64Test.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TFloat64Test.java index 906a985001a..d079028b719 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TFloat64Test.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TFloat64Test.java @@ -24,7 +24,7 @@ public class TFloat64Test extends NumericTypesTestBase { @Override Tensor allocateTensor(Shape shape) { - return TFloat64.ofShape(shape); + return TFloat64.tensorOf(shape); } @Override diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TInt32Test.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TInt32Test.java index 4327e849d4d..28dda560903 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TInt32Test.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TInt32Test.java @@ -24,7 +24,7 @@ public class TInt32Test extends NumericTypesTestBase { @Override Tensor allocateTensor(Shape shape) { - return TInt32.ofShape(shape); + return TInt32.tensorOf(shape); } @Override diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TInt64Test.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TInt64Test.java index b92982ad2f3..4b2ca447bb0 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TInt64Test.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TInt64Test.java @@ -24,7 +24,7 @@ public class TInt64Test extends NumericTypesTestBase { @Override Tensor allocateTensor(Shape shape) { - return TInt64.ofShape(shape); + return TInt64.tensorOf(shape); } @Override diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TStringTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TStringTest.java index f2a13f214bd..c40caf35595 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TStringTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TStringTest.java @@ -48,20 +48,20 @@ public void createVector() { TString data = tensor.data(); assertNotNull(data); - assertEquals(Shape.make(2), data.shape()); + assertEquals(Shape.of(2), data.shape()); assertEquals("Pretty", data.getObject(0)); assertEquals("vacant", data.getObject(1)); } @Test public void createCopy() { - NdArray strings = NdArrays.ofObjects(String.class, Shape.make(2, 2)) + NdArray strings = NdArrays.ofObjects(String.class, Shape.of(2, 2)) .setObject("Pretty", 0, 0) .setObject("vacant", 0, 1) .setObject("New", 1, 0) .setObject("York", 1, 1); - Tensor tensor = TString.copyOf(strings); + Tensor tensor = TString.tensorOf(strings); assertNotNull(tensor); TString data = tensor.data(); @@ -73,7 +73,7 @@ public void createCopy() { @Test public void defaultCharsetIsUtf8() { - Tensor tensor = TString.copyOf(NdArrays.scalarOfObject(BABY_CHICK)); + Tensor tensor = TString.tensorOf(NdArrays.scalarOfObject(BABY_CHICK)); byte[] bytes = tensor.data().asBytes().getObject(); assertArrayEquals(new byte[] { (byte)0xF0, (byte)0x9F, (byte)0x90, (byte)0xA5 }, bytes); assertEquals(BABY_CHICK, tensor.data().getObject()); @@ -81,20 +81,20 @@ public void defaultCharsetIsUtf8() { @Test public void usingDifferentCharset() { - Tensor tensor = TString.copyOf(StandardCharsets.UTF_16LE, NdArrays.scalarOfObject(BABY_CHICK)); + Tensor tensor = TString.tensorOf(StandardCharsets.UTF_16LE, NdArrays.scalarOfObject(BABY_CHICK)); byte[] bytes = tensor.data().asBytes().getObject(); assertArrayEquals(new byte[] { (byte)0x3D, (byte)0xD8, (byte)0x25, (byte)0xDC }, bytes); - assertEquals(BABY_CHICK, tensor.data().use(StandardCharsets.UTF_16LE).getObject()); + assertEquals(BABY_CHICK, tensor.data().using(StandardCharsets.UTF_16LE).getObject()); } @Test public void initializingTensorWithRawBytes() { String[] strings = new String[] { "TensorFlow", "For", "Java", "Rocks", "!" }; - NdArray bytes = NdArrays.ofObjects(byte[].class, Shape.make(strings.length)); + NdArray bytes = NdArrays.ofObjects(byte[].class, Shape.of(strings.length)); for (int i = 0; i < strings.length; ++i) { bytes.setObject(strings[i].getBytes(), i); } - Tensor tensor = TString.copyOfBytes(bytes); + Tensor tensor = TString.tensorOfBytes(bytes); assertNotNull(tensor); assertEquals(bytes.shape(), tensor.shape()); diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TUint8Test.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TUint8Test.java index 95c27a7e881..5754ec0de87 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TUint8Test.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/types/TUint8Test.java @@ -24,7 +24,7 @@ public class TUint8Test extends NumericTypesTestBase { @Override Tensor allocateTensor(Shape shape) { - return TUint8.ofShape(shape); + return TUint8.tensorOf(shape); } @Override diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java index 74557e4bca9..4d71b7ed978 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java @@ -193,7 +193,7 @@ private static class OpsSpec { } private static final Pattern JAVADOC_TAG_PATTERN = - Pattern.compile("@(?:param|return|throws|exception|see)\\s+.*"); + Pattern.compile("@(?:param|return|throws|exception|see|deprecated)\\s+.*"); private static final TypeName T_OP = ClassName.get("org.tensorflow.op", "Op"); private static final ClassName T_OPS = ClassName.get("org.tensorflow.op", "Ops"); private static final TypeName T_OPERATOR = @@ -280,17 +280,14 @@ private boolean collectOpsMethods( result = false; continue; } - TypeElement opClass = (TypeElement) e; - // Skip deprecated operations for now, as we do not guarantee API stability yet - if (opClass.getAnnotation(Deprecated.class) == null) { - collectOpMethods(groupedMethods, opClass, annotation); - } + collectOpMethods(groupedMethods, (TypeElement)e, annotation); } return result; } private void collectOpMethods( Multimap groupedMethods, TypeElement opClass, TypeElement annotation) { + boolean opClassDeprecated = opClass.getAnnotation(Deprecated.class) != null; AnnotationMirror operatorAnnot = getAnnotationMirror(opClass, annotation.getQualifiedName()); if (operatorAnnot == null) { throw new IllegalArgumentException( @@ -331,7 +328,8 @@ private void collectOpMethods( } boolean describeByClass = getAnnotationElementValueAsBoolean("describeByClass", endpointAnnot, false); - MethodSpec method = buildOpMethod(endpointName, opClass, opMethod, describeByClass); + boolean deprecated = opMethod.getAnnotation(Deprecated.class) != null || opClassDeprecated; + MethodSpec method = buildOpMethod(endpointName, opClass, opMethod, describeByClass, deprecated); groupedMethods.put(endpointGroup, method); } } @@ -339,7 +337,7 @@ private void collectOpMethods( private MethodSpec buildOpMethod( String methodName, TypeElement opClass, ExecutableElement endpointMethod, - boolean describeByClass) { + boolean describeByClass, boolean deprecated) { MethodSpec.Builder builder = MethodSpec.methodBuilder(methodName) .addModifiers(Modifier.PUBLIC) @@ -347,6 +345,9 @@ private MethodSpec buildOpMethod( .varargs(endpointMethod.isVarArgs()) .addJavadoc("$L", buildOpMethodJavadoc(opClass, endpointMethod, describeByClass)); + if (deprecated) { + builder.addAnnotation(Deprecated.class); + } for (TypeParameterElement tp : endpointMethod.getTypeParameters()) { TypeVariableName tvn = TypeVariableName.get((TypeVariable) tp.asType()); builder.addTypeVariable(tvn); @@ -470,27 +471,27 @@ private static TypeSpec buildTopClass(OpsSpec spec) { + "{@link $T @Operator} is exposed\n" + "by this API or one of its subgroup.\n

          Example usage:\n

          {@code\n"
                               + "try (Graph g = new Graph()) {\n"
          -                    + "  Ops ops = Ops.create(g);\n"
          +                    + "  Ops tf = Ops.create(g);\n"
                               + "  // Operations are typed classes with convenience\n"
                               + "  // builders in Ops.\n"
          -                    + "  Constant three = ops.constant(3);\n"
          +                    + "  Constant three = tf.val(3);\n"
                               + "  // Single-result operations implement the Operand\n"
                               + "  // interface, so this works too.\n"
          -                    + "  Operand four = ops.constant(4);\n"
          +                    + "  Operand four = tf.val(4);\n"
                               + "  // Most builders are found within a group, and accept\n"
                               + "  // Operand types as operands\n"
          -                    + "  Operand nine = ops.math.add(four, ops.constant(5));\n"
          +                    + "  Operand nine = tf.math.add(four, tf.val(5));\n"
                               + "  // Multi-result operations however offer methods to\n"
                               + "  // select a particular result for use.\n"
          -                    + "  Operand result = \n"
          -                    + "      ops.math.add(ops.unique(s, a).y(), b);\n"
          +                    + "  Operand result = \n"
          +                    + "      tf.math.add(tf.unique(s, a).y(), b);\n"
                               + "  // Optional attributes\n"
          -                    + "  ops.linalg.matMul(a, b, MatMul.transposeA(true));\n"
          +                    + "  tf.linalg.matMul(a, b, MatMul.transposeA(true));\n"
                               + "  // Naming operators\n"
          -                    + "  ops.withName(\"foo\").constant(5); // name \"foo\"\n"
          +                    + "  tf.withName(\"foo\").val(5); // name \"foo\"\n"
                               + "  // Names can exist in a hierarchy\n"
          -                    + "  Ops sub = ops.withSubScope(\"sub\");\n"
          -                    + "  sub.withName(\"bar\").constant(4); // \"sub/bar\"\n"
          +                    + "  Ops sub = tf.withSubScope(\"sub\");\n"
          +                    + "  sub.withName(\"bar\").val(4); // \"sub/bar\"\n"
                               + "}\n"
                               + "}
          \n", T_OP, diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/Shape.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/Shape.java index 201d052bf23..3ecc1f131f0 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/Shape.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/Shape.java @@ -42,19 +42,19 @@ public static Shape scalar() { * *
          {@code
              * // A 2-element vector.
          -   * Shape vector = Shape.create(2);
          +   * Shape vector = Shape.of(2);
              *
              * // A 2x3 matrix.
          -   * Shape matrix = Shape.create(2, 3);
          +   * Shape matrix = Shape.of(2, 3);
              *
              * // A matrix with 4 columns but an unknown number of rows.
              * // This is typically used to indicate the shape of tensors that represent
              * // a variable-sized batch of values. The Shape below might represent a
              * // variable-sized batch of 4-element vectors.
          -   * Shape batch = Shape.create(-1, 4);
          +   * Shape batch = Shape.of(-1, 4);
              * }
          */ - public static Shape make(long... dimensionSizes) { + public static Shape of(long... dimensionSizes) { if (dimensionSizes == null || dimensionSizes.length == 0) { return scalar(); } diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/StaticApi.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/StaticApi.java index 836edd95285..d6b7d8f3bbc 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/StaticApi.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/StaticApi.java @@ -329,105 +329,105 @@ static BooleanNdArray ndArrayOfBooleans(Shape shape) { /** * Wraps a buffer in an N-dimensional array of a given shape. * - * @param buffer buffer to wrap * @param shape shape of the array + * @param buffer buffer to wrap * @return new N-dimensional array * @throws IllegalArgumentException if shape is null, has unknown dimensions or has size bigger * in the buffer size */ - static NdArray ndArrayOf(DataBuffer buffer, Shape shape) { - return NdArrays.wrap(buffer, shape); + static NdArray ndArrayOf(Shape shape, DataBuffer buffer) { + return NdArrays.wrap(shape, buffer); } /** * Wraps a byte buffer in an N-dimensional array of a given shape. * - * @param buffer buffer to wrap * @param shape shape of the array + * @param buffer buffer to wrap * @return new byte N-dimensional array * @throws IllegalArgumentException if shape is null, has unknown dimensions or has size bigger * in the buffer size */ - static ByteNdArray ndArrayOf(ByteDataBuffer buffer, Shape shape) { + static ByteNdArray ndArrayOf(Shape shape, ByteDataBuffer buffer) { return NdArrays.wrap(buffer, shape); } /** * Wraps a short buffer in an N-dimensional array of a given shape. * - * @param buffer buffer to wrap * @param shape shape of the array + * @param buffer buffer to wrap * @return new short N-dimensional array * @throws IllegalArgumentException if shape is null, has unknown dimensions or has size bigger * in the buffer size */ - static ShortNdArray ndArrayOf(ShortDataBuffer buffer, Shape shape) { - return NdArrays.wrap(buffer, shape); + static ShortNdArray ndArrayOf(Shape shape, ShortDataBuffer buffer) { + return NdArrays.wrap(shape, buffer); } /** * Wraps an int buffer in an N-dimensional array of a given shape. * - * @param buffer buffer to wrap * @param shape shape of the array + * @param buffer buffer to wrap * @return new int N-dimensional array * @throws IllegalArgumentException if shape is null, has unknown dimensions or has size bigger * in the buffer size */ - static IntNdArray ndArrayOf(IntDataBuffer buffer, Shape shape) { - return NdArrays.wrap(buffer, shape); + static IntNdArray ndArrayOf(Shape shape, IntDataBuffer buffer) { + return NdArrays.wrap(shape, buffer); } /** * Wraps a long buffer in an N-dimensional array of a given shape. * - * @param buffer buffer to wrap * @param shape shape of the array + * @param buffer buffer to wrap * @return new long N-dimensional array * @throws IllegalArgumentException if shape is null, has unknown dimensions or has size bigger * in the buffer size */ - static LongNdArray ndArrayOf(LongDataBuffer buffer, Shape shape) { + static LongNdArray ndArrayOf(Shape shape, LongDataBuffer buffer) { return NdArrays.wrap(buffer, shape); } /** * Wraps a float buffer in an N-dimensional array of a given shape. * - * @param buffer buffer to wrap * @param shape shape of the array + * @param buffer buffer to wrap * @return new float N-dimensional array * @throws IllegalArgumentException if shape is null, has unknown dimensions or has size bigger * in the buffer size */ - static FloatNdArray ndArrayOf(FloatDataBuffer buffer, Shape shape) { - return NdArrays.wrap(buffer, shape); + static FloatNdArray ndArrayOf(Shape shape, FloatDataBuffer buffer) { + return NdArrays.wrap(shape, buffer); } /** * Wraps a double buffer in an N-dimensional array of a given shape. * - * @param buffer buffer to wrap * @param shape shape of the array + * @param buffer buffer to wrap * @return new double N-dimensional array * @throws IllegalArgumentException if shape is null, has unknown dimensions or has size bigger * in the buffer size */ - static DoubleNdArray ndArrayOf(DoubleDataBuffer buffer, Shape shape) { - return NdArrays.wrap(buffer, shape); + static DoubleNdArray ndArrayOf(Shape shape, DoubleDataBuffer buffer) { + return NdArrays.wrap(shape, buffer); } /** * Wraps a boolean buffer in an N-dimensional array of a given shape. * - * @param buffer buffer to wrap * @param shape shape of the array + * @param buffer buffer to wrap * @return new boolean N-dimensional array * @throws IllegalArgumentException if shape is null, has unknown dimensions or has size bigger * in the buffer size */ - static BooleanNdArray ndArrayOf(BooleanDataBuffer buffer, Shape shape) { - return NdArrays.wrap(buffer, shape); + static BooleanNdArray ndArrayOf(Shape shape, BooleanDataBuffer buffer) { + return NdArrays.wrap(shape, buffer); } /** @@ -517,6 +517,7 @@ static BooleanNdArray scalarOf(boolean value) { * @return new vector * @throws IllegalArgumentException if values is null */ + @SafeVarargs static NdArray vectorOfObjects(T... values) { return NdArrays.vectorOfObjects(values); } @@ -602,10 +603,10 @@ static BooleanNdArray vectorOf(boolean... values) { * Create a Shape representing an N-dimensional value. * * @param dimensionSize size of each dimension in the shape, {@link Shape#UNKNOWN_SIZE} if unknown - * @see Shape#make(long...) + * @see Shape#of(long...) */ static Shape shapeOf(long... dimensionSize) { - return Shape.make(dimensionSize); + return Shape.of(dimensionSize); } /** diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/buffer/DataBuffer.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/buffer/DataBuffer.java index 5e2b89bae7b..e7030c9ff58 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/buffer/DataBuffer.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/buffer/DataBuffer.java @@ -17,6 +17,8 @@ package org.tensorflow.tools.buffer; +import java.nio.BufferOverflowException; +import java.nio.BufferUnderflowException; import java.nio.ReadOnlyBufferException; /** @@ -86,7 +88,94 @@ public interface DataBuffer { DataBuffer setObject(T value, long index); /** - * Copy data of this buffer in the given buffer. + * Read the references of the objects in this buffer into the destination array. + *

          + * This method transfers values from this buffer into the given destination array. If there are + * fewer values in the buffer than are required to satisfy the request, that is, if + * {@code dst.length > size()}, then no values are transferred and a + * BufferUnderflowException is thrown. + *

          + * Otherwise, this method copies {@code n = dst.length} values from this buffer into the given + * array. + * + * @param dst the array into which values are to be written + * @return this buffer + * @throws BufferUnderflowException if there are not enough values to copy from this buffer + */ + default DataBuffer read(T[] dst) { + return read(dst, 0, dst.length); + } + + /** + * Read the references of the objects in this buffer into the destination array. + *

          + * This method transfers values from this buffer into the given destination array. If there are + * fewer values in the buffer than are required to satisfy the request, that is, if + * {@code length > size()}, then no values are transferred and a + * BufferUnderflowException is thrown. + *

          + * Otherwise, this method copies {@code n = length} values from this buffer into the given array + * starting at the given offset. + * + * @param dst the array into which values are to be written + * @param offset the offset within the array of the first value to be written; must be + * non-negative and no larger than {@code dst.length} + * @param length the maximum number of values to be written to the given array; must be + * non-negative and no larger than {@code dst.length - offset} + * @return this buffer + * @throws BufferUnderflowException if there are fewer than length values remaining in this buffer + * @throws IndexOutOfBoundsException if the preconditions on the offset and length parameters do + * not hold + */ + DataBuffer read(T[] dst, int offset, int length); + + /** + * Write the references of the objects in the source array into this buffer. + *

          + * This method transfers the values in the given source array into this buffer. If there are + * more values in the source array than in this buffer, that is, if + * {@code src.length > size()}, then no values are transferred and a + * BufferOverflowException is thrown. + *

          + * Otherwise, this method copies {@code n = src.length} values from the given array. + * + * @param src the source array from which values are to be read + * @return this buffer + * @throws BufferOverflowException if there is insufficient space in this buffer for the values in + * the source array + * @throws ReadOnlyBufferException if this buffer is read-only + */ + default DataBuffer write(T[] src) { + return write(src, 0, src.length); + } + + /** + * Bulk put method, using int arrays. + *

          + * This method transfers the values in the given source array into this buffer. If there are + * more values in the source array than in this buffer, that is, if + * {@code length > size()}, then no values are transferred and a + * BufferOverflowException is thrown. + *

          + * Otherwise, this method copies {@code n = length} values from the given array into this buffer, + * starting at the given offset. + * + * @param src the source array from which values are to be read + * @param offset the offset within the array of the first value to be read; must be non-negative + * and no larger than {@code src.length} + * @param length the number of values to be read from the given array; must be non-negative and no + * larger than {@code src.length - offset} + * @return this buffer + * @throws BufferOverflowException if there is insufficient space in this buffer for the values in + * the source array + * @throws IndexOutOfBoundsException if the preconditions on the offset and length parameters do + * not hold + * @throws ReadOnlyBufferException if this buffer is read-only + */ + DataBuffer write(T[] src, int offset, int length); + + /** + * Write the references of the objects in the source array into this buffer. *

          * If there are more values to copy than the destination buffer size, i.e. * {@code size > dst.size()}, then no values are transferred and a diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/buffer/DataBuffers.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/buffer/DataBuffers.java index 0e04cc8080e..752ed22598c 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/buffer/DataBuffers.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/buffer/DataBuffers.java @@ -148,6 +148,111 @@ public static DataBuffer ofObjects(Class type, long size) { return MiscDataBufferFactory.create(array, false); } + /** + * Create a buffer from an array of floats into a data buffer. + * + *

          The returned buffer allows read and write operations and share the memory of the source + * array, which is equivalent to call {@link #from(float[], boolean, boolean) from(values, false, false}} + * + * @param values float values + * @return a new buffer + */ + public static FloatDataBuffer of(float... values) { + return from(values, false, false); + } + + /** + * Create a buffer from an array of bytes into a data buffer. + * + *

          The returned buffer allows read and write operations and share the memory of the source + * array, which is equivalent to call {@link #from(byte[], boolean, boolean) from(values, false, false}} + * + * @param values byte values + * @return a new buffer + */ + public static ByteDataBuffer of(byte... values) { + return from(values, false, false); + } + + /** + * Create a buffer from an array of longs into a data buffer. + * + *

          The returned buffer allows read and write operations and share the memory of the source + * array, which is equivalent to call {@link #from(long[], boolean, boolean) from(values, false, false}} + * + * @param values long values + * @return a new buffer + */ + public static LongDataBuffer of(long... values) { + return from(values, false, false); + } + + /** + * Create a buffer from an array of ints into a data buffer. + * + *

          The returned buffer allows read and write operations and share the memory of the source + * array, which is equivalent to call {@link #from(int[], boolean, boolean) from(values, false, false}} + * + * @param values int values + * @return a new buffer + */ + public static IntDataBuffer of(int... values) { + return from(values, false, false); + } + + /** + * Create a buffer from an array of shorts into a data buffer. + * + *

          The returned buffer allows read and write operations and share the memory of the source + * array, which is equivalent to call {@link #from(short[], boolean, boolean) from(values, false, false}} + * + * @param values short values + * @return a new buffer + */ + public static ShortDataBuffer of(short... values) { + return from(values, false, false); + } + + /** + * Create a buffer from an array of doubles into a data buffer. + * + *

          The returned buffer allows read and write operations and share the memory of the source + * array, which is equivalent to call {@link #from(double[], boolean, boolean) from(array, false, false}} + * + * @param values double values + * @return a new buffer + */ + public static DoubleDataBuffer of(double... values) { + return from(values, false, false); + } + + /** + * Create a buffer from an array of booleans into a data buffer. + * + *

          The returned buffer allows read and write operations and share the memory of the source + * array, which is equivalent to call {@link #from(boolean[], boolean, boolean) from(values, false, false}} + * + * @param values booleans values + * @return a new buffer + */ + public static BooleanDataBuffer of(boolean... values) { + return from(values, false, false); + } + + /** + * Create a buffer from an array of objects into a data buffer. + * + *

          The returned buffer allows read and write operations and share the memory of the source + * array, which is equivalent to call {@link #from(T[], boolean, boolean)} from(values, false, false}} + * + * @param values objects values + * @return a new buffer + */ + @SafeVarargs + public static DataBuffer ofObjects(T... values) { + return from(values, false, false); + } + /** * Create a buffer from an array of floats into a data buffer. * diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/buffer/impl/AbstractDataBuffer.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/buffer/impl/AbstractDataBuffer.java index c3be19f2bfe..871cd644c1a 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/buffer/impl/AbstractDataBuffer.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/buffer/impl/AbstractDataBuffer.java @@ -20,6 +20,24 @@ public abstract class AbstractDataBuffer implements DataBuffer { + @Override + public DataBuffer read(T[] dst, int offset, int length) { + Validator.readArgs(this, dst.length, offset, length); + for (int i = 0; i < length; ++i) { + dst[i + offset] = getObject(i); + } + return this; + } + + @Override + public DataBuffer write(T[] src, int offset, int length) { + Validator.writeArgs(this, src.length, offset, length); + for (int i = 0; i < length; ++i) { + setObject(src[i + offset], i); + } + return this; + } + protected void slowCopyTo(DataBuffer dst, long size) { for (long idx = 0; idx < size; ++idx) { dst.setObject(getObject(idx), idx); diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/BooleanNdArray.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/BooleanNdArray.java index bba08ffdeb5..a16ff9e3abc 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/BooleanNdArray.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/BooleanNdArray.java @@ -67,66 +67,6 @@ public interface BooleanNdArray extends NdArray { */ BooleanNdArray setBoolean(boolean value, long... coordinates); - /** - * Reads the content of this N-dimensional array into the destination boolean array. - * - *

          The size of the destination array must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param dst the destination array - * @return this array - * @throws java.nio.BufferOverflowException if the destination array cannot hold the content of this array - */ - default BooleanNdArray read(boolean[] dst) { - return read(dst, 0); - } - - /** - * Reads the content of this N-dimensional array into the destination boolean array. - * - *

          {@code dst.length - offset} must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param dst the destination array - * @param offset the index of the first boolean to write in the destination array - * @return this array - * @throws java.nio.BufferOverflowException if the destination array cannot hold the content of this array - * @throws IndexOutOfBoundsException if offset is greater than dst length or is negative - */ - BooleanNdArray read(boolean[] dst, int offset); - - /** - * Writes the content of this N-dimensional array from the source boolean array. - * - *

          The size of the source array must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param src the source array - * @return this array - * @throws java.nio.BufferUnderflowException if the size of the source array is less than the size of this array - */ - default BooleanNdArray write(boolean[] src) { - return write(src, 0); - } - - /** - * Writes the content of this N-dimensional array from the source boolean array. - * - *

          {@code src.length - offset} must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param src the source array - * @param offset the index of the first boolean to read from the source array - * @return this array - * @throws java.nio.BufferUnderflowException if the size of the source array is less than the size of this array - * @throws IndexOutOfBoundsException if offset is greater than src length or is negative - */ - BooleanNdArray write(boolean[] src, int offset); - @Override BooleanNdArray slice(Index... indices); @@ -164,16 +104,4 @@ default BooleanNdArray setObject(Boolean value, long... coordinates) { BooleanNdArray write(DataBuffer src); BooleanNdArray write(BooleanDataBuffer src); - - @Override - BooleanNdArray read(Boolean[] dst); - - @Override - BooleanNdArray read(Boolean[] dst, int offset); - - @Override - BooleanNdArray write(Boolean[] src); - - @Override - BooleanNdArray write(Boolean[] src, int offset); } diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/ByteNdArray.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/ByteNdArray.java index 4d9e35bfaef..59ef8c05da6 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/ByteNdArray.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/ByteNdArray.java @@ -67,66 +67,6 @@ public interface ByteNdArray extends NdArray { */ ByteNdArray setByte(byte value, long... coordinates); - /** - * Reads the content of this N-dimensional array into the destination byte array. - * - *

          The size of the destination array must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param dst the destination array - * @return this array - * @throws java.nio.BufferOverflowException if the destination array cannot hold the content of this array - */ - default ByteNdArray read(byte[] dst) { - return read(dst, 0); - } - - /** - * Reads the content of this N-dimensional array into the destination byte array. - * - *

          {@code dst.length - offset} must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param dst the destination array - * @param offset the index of the first byte to write in the destination array - * @return this array - * @throws java.nio.BufferOverflowException if the destination array cannot hold the content of this array - * @throws IndexOutOfBoundsException if offset is greater than dst length or is negative - */ - ByteNdArray read(byte[] dst, int offset); - - /** - * Writes the content of this N-dimensional array from the source byte array. - * - *

          The size of the source array must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param src the source array - * @return this array - * @throws java.nio.BufferUnderflowException if the size of the source array is less than the size of this array - */ - default ByteNdArray write(byte[] src) { - return write(src, 0); - } - - /** - * Writes the content of this N-dimensional array from the source byte array. - * - *

          {@code src.length - offset} must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param src the source array - * @param offset the index of the first byte to read from the source array - * @return this array - * @throws java.nio.BufferUnderflowException if the size of the source array is less than the size of this array - * @throws IndexOutOfBoundsException if offset is greater than src length or is negative - */ - ByteNdArray write(byte[] src, int offset); - @Override ByteNdArray slice(Index... indices); @@ -164,16 +104,4 @@ default ByteNdArray setObject(Byte value, long... coordinates) { ByteNdArray write(DataBuffer src); ByteNdArray write(ByteDataBuffer src); - - @Override - ByteNdArray read(Byte[] dst); - - @Override - ByteNdArray read(Byte[] dst, int offset); - - @Override - ByteNdArray write(Byte[] src); - - @Override - ByteNdArray write(Byte[] src, int offset); } diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/DoubleNdArray.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/DoubleNdArray.java index 772440fe8cf..dd23904b570 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/DoubleNdArray.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/DoubleNdArray.java @@ -67,66 +67,6 @@ public interface DoubleNdArray extends NdArray { */ DoubleNdArray setDouble(double value, long... coordinates); - /** - * Reads the content of this N-dimensional array into the destination double array. - * - *

          The size of the destination array must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param dst the destination array - * @return this array - * @throws java.nio.BufferOverflowException if the destination array cannot hold the content of this array - */ - default DoubleNdArray read(double[] dst) { - return read(dst, 0); - } - - /** - * Reads the content of this N-dimensional array into the destination double array. - * - *

          {@code dst.length - offset} must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param dst the destination array - * @param offset the index of the first double to write in the destination array - * @return this array - * @throws java.nio.BufferOverflowException if the destination array cannot hold the content of this array - * @throws IndexOutOfBoundsException if offset is greater than dst length or is negative - */ - DoubleNdArray read(double[] dst, int offset); - - /** - * Writes the content of this N-dimensional array from the source double array. - * - *

          The size of the source array must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param src the source array - * @return this array - * @throws java.nio.BufferUnderflowException if the size of the source array is less than the size of this array - */ - default DoubleNdArray write(double[] src) { - return write(src, 0); - } - - /** - * Writes the content of this N-dimensional array from the source double array. - * - *

          {@code src.length - offset} must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param src the source array - * @param offset the index of the first double to read from the source array - * @return this array - * @throws java.nio.BufferUnderflowException if the size of the source array is less than the size of this array - * @throws IndexOutOfBoundsException if offset is greater than src length or is negative - */ - DoubleNdArray write(double[] src, int offset); - @Override DoubleNdArray slice(Index... indices); @@ -164,16 +104,4 @@ default DoubleNdArray setObject(Double value, long... coordinates) { DoubleNdArray write(DataBuffer src); DoubleNdArray write(DoubleDataBuffer src); - - @Override - DoubleNdArray read(Double[] dst); - - @Override - DoubleNdArray read(Double[] dst, int offset); - - @Override - DoubleNdArray write(Double[] src); - - @Override - DoubleNdArray write(Double[] src, int offset); } diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/FloatNdArray.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/FloatNdArray.java index 8c5dc2d4164..59876c22ca7 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/FloatNdArray.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/FloatNdArray.java @@ -67,66 +67,6 @@ public interface FloatNdArray extends NdArray { */ FloatNdArray setFloat(float value, long... coordinates); - /** - * Reads the content of this N-dimensional array into the destination float array. - * - *

          The size of the destination array must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param dst the destination array - * @return this array - * @throws java.nio.BufferOverflowException if the destination array cannot hold the content of this array - */ - default FloatNdArray read(float[] dst) { - return read(dst, 0); - } - - /** - * Reads the content of this N-dimensional array into the destination float array. - * - *

          {@code dst.length - offset} must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param dst the destination array - * @param offset the index of the first float to write in the destination array - * @return this array - * @throws java.nio.BufferOverflowException if the destination array cannot hold the content of this array - * @throws IndexOutOfBoundsException if offset is greater than dst length or is negative - */ - FloatNdArray read(float[] dst, int offset); - - /** - * Writes the content of this N-dimensional array from the source float array. - * - *

          The size of the source array must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param src the source array - * @return this array - * @throws java.nio.BufferUnderflowException if the size of the source array is less than the size of this array - */ - default FloatNdArray write(float[] src) { - return write(src, 0); - } - - /** - * Writes the content of this N-dimensional array from the source float array. - * - *

          {@code src.length - offset} must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param src the source array - * @param offset the index of the first float to read from the source array - * @return this array - * @throws java.nio.BufferUnderflowException if the size of the source array is less than the size of this array - * @throws IndexOutOfBoundsException if offset is greater than src length or is negative - */ - FloatNdArray write(float[] src, int offset); - @Override FloatNdArray slice(Index... coordinates); @@ -164,16 +104,4 @@ default FloatNdArray setObject(Float value, long... coordinates) { FloatNdArray write(DataBuffer src); FloatNdArray write(FloatDataBuffer src); - - @Override - FloatNdArray read(Float[] dst); - - @Override - FloatNdArray read(Float[] dst, int offset); - - @Override - FloatNdArray write(Float[] src); - - @Override - FloatNdArray write(Float[] src, int offset); } diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/IllegalRankException.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/IllegalRankException.java index 21c9b43c2c4..4ec44150caa 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/IllegalRankException.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/IllegalRankException.java @@ -16,6 +16,9 @@ */ package org.tensorflow.tools.ndarray; +/** + * Exception thrown when an operation cannot be completed because of the rank of the targeted array. + */ public class IllegalRankException extends IllegalArgumentException { public IllegalRankException(String message) { diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/IntNdArray.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/IntNdArray.java index 7158996664d..475dbf6fb75 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/IntNdArray.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/IntNdArray.java @@ -67,66 +67,6 @@ public interface IntNdArray extends NdArray { */ IntNdArray setInt(int value, long... coordinates); - /** - * Reads the content of this N-dimensional array into the destination int array. - * - *

          The size of the destination array must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param dst the destination array - * @return this array - * @throws java.nio.BufferOverflowException if the destination array cannot hold the content of this array - */ - default IntNdArray read(int[] dst) { - return read(dst, 0); - } - - /** - * Reads the content of this N-dimensional array into the destination int array. - * - *

          {@code dst.length - offset} must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param dst the destination array - * @param offset the index of the first integer to write in the destination array - * @return this array - * @throws java.nio.BufferOverflowException if the destination array cannot hold the content of this array - * @throws IndexOutOfBoundsException if offset is greater than dst length or is negative - */ - IntNdArray read(int[] dst, int offset); - - /** - * Writes the content of this N-dimensional array from the source int array. - * - *

          The size of the source array must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param src the source array - * @return this array - * @throws java.nio.BufferUnderflowException if the size of the source array is less than the size of this array - */ - default IntNdArray write(int[] src) { - return write(src, 0); - } - - /** - * Writes the content of this N-dimensional array from the source int array. - * - *

          {@code src.length - offset} must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param src the source array - * @param offset the index of the first integer to read from the source array - * @return this array - * @throws java.nio.BufferUnderflowException if the size of the source array is less than the size of this array - * @throws IndexOutOfBoundsException if offset is greater than src length or is negative - */ - IntNdArray write(int[] src, int offset); - @Override IntNdArray slice(Index... indices); @@ -164,16 +104,4 @@ default IntNdArray setObject(Integer value, long... coordinates) { IntNdArray write(DataBuffer src); IntNdArray write(IntDataBuffer src); - - @Override - IntNdArray read(Integer[] dst); - - @Override - IntNdArray read(Integer[] dst, int offset); - - @Override - IntNdArray write(Integer[] src); - - @Override - IntNdArray write(Integer[] src, int offset); } diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/LongNdArray.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/LongNdArray.java index bd5c7a17128..f545fc21550 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/LongNdArray.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/LongNdArray.java @@ -67,66 +67,6 @@ public interface LongNdArray extends NdArray { */ LongNdArray setLong(long value, long... coordinates); - /** - * Reads the content of this N-dimensional array into the destination long array. - * - *

          The size of the destination array must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param dst the destination array - * @return this array - * @throws java.nio.BufferOverflowException if the destination array cannot hold the content of this array - */ - default LongNdArray read(long[] dst) { - return read(dst, 0); - } - - /** - * Reads the content of this N-dimensional array into the destination long array. - * - *

          {@code dst.length - offset} must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param dst the destination array - * @param offset the index of the first long to write in the destination array - * @return this array - * @throws java.nio.BufferOverflowException if the destination array cannot hold the content of this array - * @throws IndexOutOfBoundsException if offset is greater than dst length or is negative - */ - LongNdArray read(long[] dst, int offset); - - /** - * Writes the content of this N-dimensional array from the source long array. - * - *

          The size of the source array must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param src the source array - * @return this array - * @throws java.nio.BufferUnderflowException if the size of the source array is less than the size of this array - */ - default LongNdArray write(long[] src) { - return write(src, 0); - } - - /** - * Writes the content of this N-dimensional array from the source long array. - * - *

          {@code src.length - offset} must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param src the source array - * @param offset the index of the first long to read from the source array - * @return this array - * @throws java.nio.BufferUnderflowException if the size of the source array is less than the size of this array - * @throws IndexOutOfBoundsException if offset is greater than src length or is negative - */ - LongNdArray write(long[] src, int offset); - @Override LongNdArray slice(Index... indices); @@ -164,16 +104,4 @@ default LongNdArray setObject(Long value, long... coordinates) { LongNdArray write(DataBuffer src); LongNdArray write(LongDataBuffer src); - - @Override - LongNdArray read(Long[] dst); - - @Override - LongNdArray read(Long[] dst, int offset); - - @Override - LongNdArray write(Long[] src); - - @Override - LongNdArray write(Long[] src, int offset); } diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/NdArray.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/NdArray.java index 7c5664a56f8..63d6b0947fa 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/NdArray.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/NdArray.java @@ -42,12 +42,12 @@ * FloatNdArray matrix3d = ndArrayOfFloats(shape(2, 3, 2)); * * // Initialize sub-matrices data with vectors - * matrix.set(vector(1.0f, 2.0f), 0, 0) - * .set(vector(3.0f, 4.0f), 0, 1) - * .set(vector(5.0f, 6.0f), 0, 2) - * .set(vector(7.0f, 8.0f), 1, 0) - * .set(vector(9.0f, 10.0f), 1, 1) - * .set(vector(11.0f, 12.0f), 1, 2); + * matrix.set(vectorOf(1.0f, 2.0f), 0, 0) + * .set(vectorOf(3.0f, 4.0f), 0, 1) + * .set(vectorOf(5.0f, 6.0f), 0, 2) + * .set(vectorOf(7.0f, 8.0f), 1, 0) + * .set(vectorOf(9.0f, 10.0f), 1, 1) + * .set(vectorOf(11.0f, 12.0f), 1, 2); * * // Access the second 3x2 matrix (of rank 2) * FloatNdArray matrix = matrix3d.get(1); @@ -293,64 +293,4 @@ default long size() { * @see DataBuffer#size() */ NdArray write(DataBuffer src); - - /** - * Reads the content of this N-dimensional array into the destination array. - * - *

          The size of the destination array must be equal or greater to the {@link #size()} of this - * array, or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param dst the destination array - * @return this array - * @throws java.nio.BufferOverflowException if the destination array cannot hold the content of - * this array - */ - NdArray read(T[] dst); - - /** - * Reads the content of this N-dimensional array into the destination array. - * - *

          {@code dst.length - offset} must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param dst the destination array - * @param offset the index of the first element to write in the destination array - * @return this array - * @throws java.nio.BufferOverflowException if the destination array cannot hold the content of - * this array - * @throws IllegalArgumentException if offset is greater than dst length or is negative - */ - NdArray read(T[] dst, int offset); - - /** - * Writes the content of this N-dimensional array from the source array. - * - *

          The size of the source array must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param src the source array - * @return this array - * @throws java.nio.BufferUnderflowException if the size of the source array is less than the size - * of this array - */ - NdArray write(T[] src); - - /** - * Writes the content of this N-dimensional array from the source array. - * - *

          {@code src.length - offset} must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param src the source array - * @param offset the index of the first byte to read from the source array - * @return this array - * @throws java.nio.BufferUnderflowException if the size of the source array is less than the size - * of this array - * @throws IllegalArgumentException if offset is greater than src length or is negative - */ - NdArray write(T[] src, int offset); } diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/NdArrays.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/NdArrays.java index 7922c0d0cf7..fef35f923ff 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/NdArrays.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/NdArrays.java @@ -36,7 +36,7 @@ import org.tensorflow.tools.ndarray.impl.dense.ShortDenseNdArray; /** - * Helper class for instantiating {@link NdArray} objects. + * Utility class for instantiating {@link NdArray} objects. */ public final class NdArrays { @@ -55,6 +55,9 @@ public static ByteNdArray scalarOf(byte value) { /** * Creates a byte vector (rank 1) initialized with the given values. * + *

          Modifying the data of the returned vector will also impact the values in the array + * passed in parameter. + * * @param values vector values * @return new byte vector * @throws IllegalArgumentException if values is null @@ -63,7 +66,7 @@ public static ByteNdArray vectorOf(byte... values) { if (values == null) { throw new IllegalArgumentException("Values cannot be null"); } - return ofBytes(Shape.make(values.length)).write(values); + return wrap(DataBuffers.from(values, false, false), Shape.of(values.length)); } /** @@ -110,6 +113,9 @@ public static LongNdArray scalarOf(long value) { /** * Creates a long vector (rank 1) initialized with the given values. * + *

          Modifying the data of the returned vector will also impact the values in the array + * passed in parameter. + * * @param values vector values * @return new long vector * @throws IllegalArgumentException if values is null @@ -118,7 +124,7 @@ public static LongNdArray vectorOf(long... values) { if (values == null) { throw new IllegalArgumentException(); } - return ofLongs(Shape.make(values.length)).write(values); + return wrap(DataBuffers.from(values, false, false), Shape.of(values.length)); } /** @@ -162,6 +168,9 @@ public static IntNdArray scalarOf(int value) { /** * Creates a int vector (rank 1) initialized with the given values. * + *

          Modifying the data of the returned vector will also impact the values in the array + * passed in parameter. + * * @param values vector values * @return new int vector * @throws IllegalArgumentException if values is null @@ -170,7 +179,7 @@ public static IntNdArray vectorOf(int... values) { if (values == null) { throw new IllegalArgumentException(); } - return ofInts(Shape.make(values.length)).write(values); + return wrap(Shape.of(values.length), DataBuffers.from(values, false, false)); } /** @@ -183,19 +192,19 @@ public static IntNdArray vectorOf(int... values) { * @throws IllegalArgumentException if shape is null or has unknown dimensions */ public static IntNdArray ofInts(Shape shape) { - return wrap(DataBuffers.ofInts(shape.size()), shape); + return wrap(shape, DataBuffers.ofInts(shape.size())); } /** * Wraps a buffer in an int N-dimensional array of a given shape. * - * @param buffer buffer to wrap * @param shape shape of the array + * @param buffer buffer to wrap * @return new int N-dimensional array * @throws IllegalArgumentException if shape is null, has unknown dimensions or has size bigger * in the buffer size */ - public static IntNdArray wrap(IntDataBuffer buffer, Shape shape) { + public static IntNdArray wrap(Shape shape, IntDataBuffer buffer) { return IntDenseNdArray.create(buffer, shape); } @@ -214,6 +223,9 @@ public static ShortNdArray scalarOf(short value) { /** * Creates a short vector (rank 1) initialized with the given values. * + *

          Modifying the data of the returned vector will also impact the values in the array + * passed in parameter. + * * @param values vector values * @return new short vector * @throws IllegalArgumentException if values is null @@ -222,7 +234,7 @@ public static ShortNdArray vectorOf(short... values) { if (values == null) { throw new IllegalArgumentException(); } - return ofShorts(Shape.make(values.length)).write(values); + return wrap(Shape.of(values.length), DataBuffers.from(values, false, false)); } /** @@ -235,19 +247,19 @@ public static ShortNdArray vectorOf(short... values) { * @throws IllegalArgumentException if shape is null or has unknown dimensions */ public static ShortNdArray ofShorts(Shape shape) { - return wrap(DataBuffers.ofShorts(shape.size()), shape); + return wrap(shape, DataBuffers.ofShorts(shape.size())); } /** * Wraps a buffer in a short N-dimensional array of a given shape. * - * @param buffer buffer to wrap * @param shape shape of the array + * @param buffer buffer to wrap * @return new short N-dimensional array * @throws IllegalArgumentException if shape is null, has unknown dimensions or has size bigger * in the buffer size */ - public static ShortNdArray wrap(ShortDataBuffer buffer, Shape shape) { + public static ShortNdArray wrap(Shape shape, ShortDataBuffer buffer) { return ShortDenseNdArray.create(buffer, shape); } @@ -266,6 +278,9 @@ public static FloatNdArray scalarOf(float value) { /** * Creates a float vector (rank 1) initialized with the given values. * + *

          Modifying the data of the returned vector will also impact the values in the array + * passed in parameter. + * * @param values vector values * @return new float vector * @throws IllegalArgumentException if values is null @@ -274,7 +289,7 @@ public static FloatNdArray vectorOf(float... values) { if (values == null) { throw new IllegalArgumentException(); } - return ofFloats(Shape.make(values.length)).write(values); + return wrap(Shape.of(values.length), DataBuffers.from(values, false, false)); } /** @@ -287,19 +302,19 @@ public static FloatNdArray vectorOf(float... values) { * @throws IllegalArgumentException if shape is null or has unknown dimensions */ public static FloatNdArray ofFloats(Shape shape) { - return wrap(DataBuffers.ofFloats(shape.size()), shape); + return wrap(shape, DataBuffers.ofFloats(shape.size())); } /** * Wraps a buffer in a float N-dimensional array of a given shape. * - * @param buffer buffer to wrap * @param shape shape of the array + * @param buffer buffer to wrap * @return new float N-dimensional array * @throws IllegalArgumentException if shape is null, has unknown dimensions or has size bigger * in the buffer size */ - public static FloatNdArray wrap(FloatDataBuffer buffer, Shape shape) { + public static FloatNdArray wrap(Shape shape, FloatDataBuffer buffer) { return FloatDenseNdArray.create(buffer, shape); } @@ -318,6 +333,9 @@ public static DoubleNdArray scalarOf(double value) { /** * Creates a double vector (rank 1) initialized with the given values. * + *

          Modifying the data of the returned vector will also impact the values in the array + * passed in parameter. + * * @param values vector values * @return new double vector * @throws IllegalArgumentException if values is null @@ -326,7 +344,7 @@ public static DoubleNdArray vectorOf(double... values) { if (values == null) { throw new IllegalArgumentException(); } - return ofDoubles(Shape.make(values.length)).write(values); + return wrap(Shape.of(values.length), DataBuffers.from(values, false, false)); } /** @@ -339,19 +357,19 @@ public static DoubleNdArray vectorOf(double... values) { * @throws IllegalArgumentException if shape is null or has unknown dimensions */ public static DoubleNdArray ofDoubles(Shape shape) { - return wrap(DataBuffers.ofDoubles(shape.size()), shape); + return wrap(shape, DataBuffers.ofDoubles(shape.size())); } /** * Wraps a buffer in a double N-dimensional array of a given shape. * - * @param buffer buffer to wrap * @param shape shape of the array + * @param buffer buffer to wrap * @return new double N-dimensional array * @throws IllegalArgumentException if shape is null, has unknown dimensions or has size bigger * in the buffer size */ - public static DoubleNdArray wrap(DoubleDataBuffer buffer, Shape shape) { + public static DoubleNdArray wrap(Shape shape, DoubleDataBuffer buffer) { return DoubleDenseNdArray.create(buffer, shape); } @@ -370,6 +388,9 @@ public static BooleanNdArray scalarOf(boolean value) { /** * Creates a boolean vector (rank 1) initialized with the given values. * + *

          Modifying the data of the returned vector will also impact the values in the array + * passed in parameter. + * * @param values vector values * @return new boolean vector * @throws IllegalArgumentException if values is null @@ -378,7 +399,7 @@ public static BooleanNdArray vectorOf(boolean... values) { if (values == null) { throw new IllegalArgumentException(); } - return ofBooleans(Shape.make(values.length)).write(values); + return wrap(Shape.of(values.length), DataBuffers.from(values, false, false)); } /** @@ -391,19 +412,19 @@ public static BooleanNdArray vectorOf(boolean... values) { * @throws IllegalArgumentException if shape is null or has unknown dimensions */ public static BooleanNdArray ofBooleans(Shape shape) { - return wrap(DataBuffers.ofBooleans(shape.size()), shape); + return wrap(shape, DataBuffers.ofBooleans(shape.size())); } /** * Wraps a buffer in a boolean N-dimensional array of a given shape. * - * @param buffer buffer to wrap * @param shape shape of the array + * @param buffer buffer to wrap * @return new boolean N-dimensional array * @throws IllegalArgumentException if shape is null, has unknown dimensions or has size bigger * in the buffer size */ - public static BooleanNdArray wrap(BooleanDataBuffer buffer, Shape shape) { + public static BooleanNdArray wrap(Shape shape, BooleanDataBuffer buffer) { return BooleanDenseNdArray.create(buffer, shape); } @@ -426,16 +447,19 @@ public static NdArray scalarOfObject(T value) { /** * Creates a vector (rank 1) initialized with the given values. * + *

          Modifying the data of the returned vector will also impact the values in the array + * passed in parameter. + * * @param values vector values * @return new vector * @throws IllegalArgumentException if values is null */ - @SuppressWarnings("unchecked") + @SafeVarargs public static NdArray vectorOfObjects(T... values) { if (values == null) { throw new IllegalArgumentException(); } - return ofObjects((Class)values[0].getClass(), Shape.make(values.length)).write(values); + return wrap(Shape.of(values.length), DataBuffers.from(values, false, false)); } /** @@ -449,19 +473,19 @@ public static NdArray vectorOfObjects(T... values) { * @throws IllegalArgumentException if shape is null or has unknown dimensions */ public static NdArray ofObjects(Class clazz, Shape shape) { - return wrap(DataBuffers.ofObjects(clazz, shape.size()), shape); + return wrap(shape, DataBuffers.ofObjects(clazz, shape.size())); } /** * Wraps a buffer in an N-dimensional array of a given shape. * - * @param buffer buffer to wrap * @param shape shape of the array + * @param buffer buffer to wrap * @return new N-dimensional array * @throws IllegalArgumentException if shape is null, has unknown dimensions or has size bigger * in the buffer size */ - public static NdArray wrap(DataBuffer buffer, Shape shape) { + public static NdArray wrap(Shape shape, DataBuffer buffer) { return DenseNdArray.wrap(buffer, shape); } } diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/ShortNdArray.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/ShortNdArray.java index 2a8b6c160aa..9835714686d 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/ShortNdArray.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/ShortNdArray.java @@ -67,66 +67,6 @@ public interface ShortNdArray extends NdArray { */ ShortNdArray setShort(short value, long... coordinates); - /** - * Reads the content of this N-dimensional array into the destination short array. - * - *

          The size of the destination array must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param dst the destination array - * @return this array - * @throws java.nio.BufferOverflowException if the destination array cannot hold the content of this array - */ - default ShortNdArray read(short[] dst) { - return read(dst, 0); - } - - /** - * Reads the content of this N-dimensional array into the destination short array. - * - *

          {@code dst.length - offset} must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param dst the destination array - * @param offset the index of the first short to write in the destination array - * @return this array - * @throws java.nio.BufferOverflowException if the destination array cannot hold the content of this array - * @throws IndexOutOfBoundsException if offset is greater than dst length or is negative - */ - ShortNdArray read(short[] dst, int offset); - - /** - * Writes the content of this N-dimensional array from the source short array. - * - *

          The size of the source array must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param src the source array - * @return this array - * @throws java.nio.BufferUnderflowException if the size of the source array is less than the size of this array - */ - default ShortNdArray write(short[] src) { - return write(src, 0); - } - - /** - * Writes the content of this N-dimensional array from the source short array. - * - *

          {@code src.length - offset} must be equal or greater to the {@link #size()} of this array, - * or an exception is thrown. After the copy, content of the both arrays can be altered - * independently, without affecting each other. - * - * @param src the source array - * @param offset the index of the first short to read from the source array - * @return this array - * @throws java.nio.BufferUnderflowException if the size of the source array is less than the size of this array - * @throws IndexOutOfBoundsException if offset is greater than src length or is negative - */ - ShortNdArray write(short[] src, int offset); - @Override ShortNdArray slice(Index... coordinates); @@ -164,16 +104,4 @@ default ShortNdArray setObject(Short value, long... coordinates) { ShortNdArray write(DataBuffer src); ShortNdArray write(ShortDataBuffer src); - - @Override - ShortNdArray read(Short[] dst); - - @Override - ShortNdArray read(Short[] dst, int offset); - - @Override - ShortNdArray write(Short[] src); - - @Override - ShortNdArray write(Short[] src, int offset); } diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/StdArrays.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/StdArrays.java new file mode 100644 index 00000000000..f089b25f245 --- /dev/null +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/StdArrays.java @@ -0,0 +1,1476 @@ +package org.tensorflow.tools.ndarray; + +import static org.tensorflow.tools.ndarray.NdArrays.vectorOf; +import static org.tensorflow.tools.ndarray.NdArrays.vectorOfObjects; + +import org.tensorflow.tools.Shape; + +/** + * Utility class for working with {@link NdArray} instances mixed with standard Java arrays. + */ +public final class StdArrays { + + /** + * Copy a single-dimension array of ints into the {@code dst} {@link NdArray} + * + * @param dst destination rank-1 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-1 or has an incompatible shape + * with the source array + */ + public static void copyTo(IntNdArray dst, int[] array) { + vectorOf(array).copyTo(dst); + } + + /** + * Copy a 2-dimensions array of ints into the {@code dst} {@link NdArray} + * + * @param dst destination rank-2 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-2 or has an incompatible shape + * with the source array + */ + public static void copyTo(IntNdArray dst, int[][] array) { + dst.elements(0).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]]).copyTo(e) + ); + } + + /** + * Copy a 3-dimensions array of ints into the {@code dst} {@link NdArray} + * + * @param dst destination rank-3 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-3 or has an incompatible shape + * with the source array + */ + public static void copyTo(IntNdArray dst, int[][][] array) { + dst.elements(1).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]]).copyTo(e) + ); + } + + /** + * Copy a 4-dimensions array of ints into the {@code dst} {@link NdArray} + * + * @param dst destination rank-4 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-4 or has an incompatible shape + * with the source array + */ + public static void copyTo(IntNdArray dst, int[][][][] array) { + dst.elements(2).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]]).copyTo(e) + ); + } + + /** + * Copy a 5-dimensions array of ints into the {@code dst} {@link NdArray} + * + * @param dst destination rank-5 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-5 or has an incompatible shape + * with the source array + */ + public static void copyTo(IntNdArray dst, int[][][][][] array) { + dst.elements(3).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]][(int)idx[3]]).copyTo(e) + ); + } + + /** + * Copy a 6-dimensions array of ints into the {@code dst} {@link NdArray} + * + * @param dst destination rank-6 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-6 or has an incompatible shape + * with the source array + */ + public static void copyTo(IntNdArray dst, int[][][][][][] array) { + dst.elements(4).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]][(int)idx[3]][(int)idx[4]]).copyTo(e) + ); + } + + /** + * Copy a single-dimension array of longs into the {@code dst} {@link NdArray} + * + * @param dst destination rank-1 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-1 or has an incompatible shape + * with the source array + */ + public static void copyTo(LongNdArray dst, long[] array) { + vectorOf(array).copyTo(dst); + } + + /** + * Copy a 2-dimensions array of longs into the {@code dst} {@link NdArray} + * + * @param dst destination rank-2 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-2 or has an incompatible shape + * with the source array + */ + public static void copyTo(LongNdArray dst, long[][] array) { + dst.elements(0).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]]).copyTo(e) + ); + } + + /** + * Copy a 3-dimensions array of longs into the {@code dst} {@link NdArray} + * + * @param dst destination rank-3 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-3 or has an incompatible shape + * with the source array + */ + public static void copyTo(LongNdArray dst, long[][][] array) { + dst.elements(1).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]]).copyTo(e) + ); + } + + /** + * Copy a 4-dimensions array of longs into the {@code dst} {@link NdArray} + * + * @param dst destination rank-4 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-4 or has an incompatible shape + * with the source array + */ + public static void copyTo(LongNdArray dst, long[][][][] array) { + dst.elements(2).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]]).copyTo(e) + ); + } + + /** + * Copy a 5-dimensions array of longs into the {@code dst} {@link NdArray} + * + * @param dst destination rank-5 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-5 or has an incompatible shape + * with the source array + */ + public static void copyTo(LongNdArray dst, long[][][][][] array) { + dst.elements(3).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]][(int)idx[3]]).copyTo(e) + ); + } + + /** + * Copy a 6-dimensions array of longs into the {@code dst} {@link NdArray} + * + * @param dst destination rank-6 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-6 or has an incompatible shape + * with the source array + */ + public static void copyTo(LongNdArray dst, long[][][][][][] array) { + dst.elements(4).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]][(int)idx[3]][(int)idx[4]]).copyTo(e) + ); + } + + /** + * Copy a single-dimension array of floats into the {@code dst} {@link NdArray} + * + * @param dst destination rank-1 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-1 or has an incompatible shape + * with the source array + */ + public static void copyTo(FloatNdArray dst, float[] array) { + vectorOf(array).copyTo(dst); + } + + /** + * Copy a 2-dimensions array of floats into the {@code dst} {@link NdArray} + * + * @param dst destination rank-2 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-2 or has an incompatible shape + * with the source array + */ + public static void copyTo(FloatNdArray dst, float[][] array) { + dst.elements(0).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]]).copyTo(e) + ); + } + + /** + * Copy a 3-dimensions array of floats into the {@code dst} {@link NdArray} + * + * @param dst destination rank-3 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-3 or has an incompatible shape + * with the source array + */ + public static void copyTo(FloatNdArray dst, float[][][] array) { + dst.elements(1).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]]).copyTo(e) + ); + } + + /** + * Copy a 4-dimensions array of floats into the {@code dst} {@link NdArray} + * + * @param dst destination rank-4 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-4 or has an incompatible shape + * with the source array + */ + public static void copyTo(FloatNdArray dst, float[][][][] array) { + dst.elements(2).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]]).copyTo(e) + ); + } + + /** + * Copy a 5-dimensions array of floats into the {@code dst} {@link NdArray} + * + * @param dst destination rank-5 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-5 or has an incompatible shape + * with the source array + */ + public static void copyTo(FloatNdArray dst, float[][][][][] array) { + dst.elements(3).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]][(int)idx[3]]).copyTo(e) + ); + } + + /** + * Copy a 6-dimensions array of floats into the {@code dst} {@link NdArray} + * + * @param dst destination rank-6 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-6 or has an incompatible shape + * with the source array + */ + public static void copyTo(FloatNdArray dst, float[][][][][][] array) { + dst.elements(4).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]][(int)idx[3]][(int)idx[4]]).copyTo(e) + ); + } + + /** + * Copy a single-dimension array of doubles into the {@code dst} {@link NdArray} + * + * @param dst destination rank-1 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-1 or has an incompatible shape + * with the source array + */ + public static void copyTo(DoubleNdArray dst, double[] array) { + vectorOf(array).copyTo(dst); + } + + /** + * Copy a 2-dimensions array of doubles into the {@code dst} {@link NdArray} + * + * @param dst destination rank-2 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-2 or has an incompatible shape + * with the source array + */ + public static void copyTo(DoubleNdArray dst, double[][] array) { + dst.elements(0).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]]).copyTo(e) + ); + } + + /** + * Copy a 3-dimensions array of doubles into the {@code dst} {@link NdArray} + * + * @param dst destination rank-3 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-3 or has an incompatible shape + * with the source array + */ + public static void copyTo(DoubleNdArray dst, double[][][] array) { + dst.elements(1).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]]).copyTo(e) + ); + } + + /** + * Copy a 4-dimensions array of doubles into the {@code dst} {@link NdArray} + * + * @param dst destination rank-4 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-4 or has an incompatible shape + * with the source array + */ + public static void copyTo(DoubleNdArray dst, double[][][][] array) { + dst.elements(2).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]]).copyTo(e) + ); + } + + /** + * Copy a 5-dimensions array of doubles into the {@code dst} {@link NdArray} + * + * @param dst destination rank-5 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-5 or has an incompatible shape + * with the source array + */ + public static void copyTo(DoubleNdArray dst, double[][][][][] array) { + dst.elements(3).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]][(int)idx[3]]).copyTo(e) + ); + } + + /** + * Copy a 6-dimensions array of doubles into the {@code dst} {@link NdArray} + * + * @param dst destination rank-6 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-6 or has an incompatible shape + * with the source array + */ + public static void copyTo(DoubleNdArray dst, double[][][][][][] array) { + dst.elements(4).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]][(int)idx[3]][(int)idx[4]]).copyTo(e) + ); + } + + /** + * Copy a single-dimension array of bytes into the {@code dst} {@link NdArray} + * + * @param dst destination rank-1 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-1 or has an incompatible shape + * with the source array + */ + public static void copyTo(ByteNdArray dst, byte[] array) { + vectorOf(array).copyTo(dst); + } + + /** + * Copy a 2-dimensions array of bytes into the {@code dst} {@link NdArray} + * + * @param dst destination rank-2 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-2 or has an incompatible shape + * with the source array + */ + public static void copyTo(ByteNdArray dst, byte[][] array) { + dst.elements(0).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]]).copyTo(e) + ); + } + + /** + * Copy a 3-dimensions array of bytes into the {@code dst} {@link NdArray} + * + * @param dst destination rank-3 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-3 or has an incompatible shape + * with the source array + */ + public static void copyTo(ByteNdArray dst, byte[][][] array) { + dst.elements(1).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]]).copyTo(e) + ); + } + + /** + * Copy a 4-dimensions array of bytes into the {@code dst} {@link NdArray} + * + * @param dst destination rank-4 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-4 or has an incompatible shape + * with the source array + */ + public static void copyTo(ByteNdArray dst, byte[][][][] array) { + dst.elements(2).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]]).copyTo(e) + ); + } + + /** + * Copy a 5-dimensions array of bytes into the {@code dst} {@link NdArray} + * + * @param dst destination rank-5 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-5 or has an incompatible shape + * with the source array + */ + public static void copyTo(ByteNdArray dst, byte[][][][][] array) { + dst.elements(3).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]][(int)idx[3]]).copyTo(e) + ); + } + + /** + * Copy a 6-dimensions array of bytes into the {@code dst} {@link NdArray} + * + * @param dst destination rank-6 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-6 or has an incompatible shape + * with the source array + */ + public static void copyTo(ByteNdArray dst, byte[][][][][][] array) { + dst.elements(4).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]][(int)idx[3]][(int)idx[4]]).copyTo(e) + ); + } + + /** + * Copy a single-dimension array of shorts into the {@code dst} {@link NdArray} + * + * @param dst destination rank-1 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-1 or has an incompatible shape + * with the source array + */ + public static void copyTo(ShortNdArray dst, short[] array) { + vectorOf(array).copyTo(dst); + } + + /** + * Copy a 2-dimensions array of shorts into the {@code dst} {@link NdArray} + * + * @param dst destination rank-2 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-2 or has an incompatible shape + * with the source array + */ + public static void copyTo(ShortNdArray dst, short[][] array) { + dst.elements(0).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]]).copyTo(e) + ); + } + + /** + * Copy a 3-dimensions array of shorts into the {@code dst} {@link NdArray} + * + * @param dst destination rank-3 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-3 or has an incompatible shape + * with the source array + */ + public static void copyTo(ShortNdArray dst, short[][][] array) { + dst.elements(1).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]]).copyTo(e) + ); + } + + /** + * Copy a 4-dimensions array of shorts into the {@code dst} {@link NdArray} + * + * @param dst destination rank-4 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-4 or has an incompatible shape + * with the source array + */ + public static void copyTo(ShortNdArray dst, short[][][][] array) { + dst.elements(2).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]]).copyTo(e) + ); + } + + /** + * Copy a 5-dimensions array of shorts into the {@code dst} {@link NdArray} + * + * @param dst destination rank-5 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-5 or has an incompatible shape + * with the source array + */ + public static void copyTo(ShortNdArray dst, short[][][][][] array) { + dst.elements(3).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]][(int)idx[3]]).copyTo(e) + ); + } + + /** + * Copy a 6-dimensions array of shorts into the {@code dst} {@link NdArray} + * + * @param dst destination rank-6 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-6 or has an incompatible shape + * with the source array + */ + public static void copyTo(ShortNdArray dst, short[][][][][][] array) { + dst.elements(4).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]][(int)idx[3]][(int)idx[4]]).copyTo(e) + ); + } + + /** + * Copy a single-dimension array of booleans into the {@code dst} {@link NdArray} + * + * @param dst destination rank-1 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-1 or has an incompatible shape + * with the source array + */ + public static void copyTo(BooleanNdArray dst, boolean[] array) { + vectorOf(array).copyTo(dst); + } + + /** + * Copy a 2-dimensions array of booleans into the {@code dst} {@link NdArray} + * + * @param dst destination rank-2 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-2 or has an incompatible shape + * with the source array + */ + public static void copyTo(BooleanNdArray dst, boolean[][] array) { + dst.elements(0).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]]).copyTo(e) + ); + } + + /** + * Copy a 3-dimensions array of booleans into the {@code dst} {@link NdArray} + * + * @param dst destination rank-3 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-3 or has an incompatible shape + * with the source array + */ + public static void copyTo(BooleanNdArray dst, boolean[][][] array) { + dst.elements(1).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]]).copyTo(e) + ); + } + + /** + * Copy a 4-dimensions array of booleans into the {@code dst} {@link NdArray} + * + * @param dst destination rank-4 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-4 or has an incompatible shape + * with the source array + */ + public static void copyTo(BooleanNdArray dst, boolean[][][][] array) { + dst.elements(2).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]]).copyTo(e) + ); + } + + /** + * Copy a 5-dimensions array of booleans into the {@code dst} {@link NdArray} + * + * @param dst destination rank-5 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-5 or has an incompatible shape + * with the source array + */ + public static void copyTo(BooleanNdArray dst, boolean[][][][][] array) { + dst.elements(3).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]][(int)idx[3]]).copyTo(e) + ); + } + + /** + * Copy a 6-dimensions array of booleans into the {@code dst} {@link NdArray} + * + * @param dst destination rank-6 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-6 or has an incompatible shape + * with the source array + */ + public static void copyTo(BooleanNdArray dst, boolean[][][][][][] array) { + dst.elements(4).forEachIndexed((idx, e) -> + vectorOf(array[(int)idx[0]][(int)idx[1]][(int)idx[2]][(int)idx[3]][(int)idx[4]]).copyTo(e) + ); + } + + /** + * Copy a single-dimension array of objects into the {@code dst} {@link NdArray} + * + * @param dst destination rank-1 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-1 or has an incompatible shape + * with the source array + */ + public static void copyTo(NdArray dst, T[] array) { + vectorOfObjects(array).copyTo(dst); + } + + /** + * Copy a 2-dimensions array of objects into the {@code dst} {@link NdArray} + * + * @param dst destination rank-2 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-2 or has an incompatible shape + * with the source array + */ + public static void copyTo(NdArray dst, T[][] array) { + dst.elements(0).forEachIndexed((idx, e) -> + vectorOfObjects(array[(int)idx[0]]).copyTo(e) + ); + } + + /** + * Copy a 3-dimensions array of objects into the {@code dst} {@link NdArray} + * + * @param dst destination rank-3 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-3 or has an incompatible shape + * with the source array + */ + public static void copyTo(NdArray dst, T[][][] array) { + dst.elements(1).forEachIndexed((idx, e) -> + vectorOfObjects(array[(int)idx[0]][(int)idx[1]]).copyTo(e) + ); + } + + /** + * Copy a 4-dimensions array of objects into the {@code dst} {@link NdArray} + * + * @param dst destination rank-4 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-4 or has an incompatible shape + * with the source array + */ + public static void copyTo(NdArray dst, T[][][][] array) { + dst.elements(2).forEachIndexed((idx, e) -> + vectorOfObjects(array[(int)idx[0]][(int)idx[1]][(int)idx[2]]).copyTo(e) + ); + } + + /** + * Copy a 5-dimensions array of objects into the {@code dst} {@link NdArray} + * + * @param dst destination rank-5 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-5 or has an incompatible shape + * with the source array + */ + public static void copyTo(NdArray dst, T[][][][][] array) { + dst.elements(3).forEachIndexed((idx, e) -> + vectorOfObjects(array[(int)idx[0]][(int)idx[1]][(int)idx[2]][(int)idx[3]]).copyTo(e) + ); + } + + /** + * Copy a 6-dimensions array of objects into the {@code dst} {@link NdArray} + * + * @param dst destination rank-6 array + * @param array source array + * @throws IllegalArgumentException if {@code dst} is not of rank-6 or has an incompatible shape + * with the source array + */ + public static void copyTo(NdArray dst, T[][][][][][] array) { + dst.elements(4).forEachIndexed((idx, e) -> + vectorOfObjects(array[(int)idx[0]][(int)idx[1]][(int)idx[2]][(int)idx[3]][(int)idx[4]]).copyTo(e) + ); + } + + /** + * Compute the shape of a single-dimension int array. + * + * @param array 1D array + * @return shape of the array + */ + public static Shape shapeOf(int[] array) { + return Shape.of(array.length); + } + + /** + * Compute the shape of a 3-dimensions int array. + * + * @param array 2D array + * @return shape of the array + */ + public static Shape shapeOf(int[][] array) { + return Shape.of(computeShape(array, new long[2])); + } + + /** + * Compute the shape of a 3-dimensions int array. + * + * @param array 3D array + * @return shape of the array + */ + public static Shape shapeOf(int[][][] array) { + return Shape.of(computeShape(array, new long[3])); + } + + /** + * Compute the shape of a 4-dimensions int array. + * + * @param array 4D array + * @return shape of the array + */ + public static Shape shapeOf(int[][][][] array) { + return Shape.of(computeShape(array, new long[4])); + } + + /** + * Compute the shape of a 5-dimensions int array. + * + * @param array 5D array + * @return shape of the array + */ + public static Shape shapeOf(int[][][][][] array) { + return Shape.of(computeShape(array, new long[5])); + } + + /** + * Compute the shape of a 6-dimensions int array. + * + * @param array 6D array + * @return shape of the array + */ + public static Shape shapeOf(int[][][][][][] array) { + return Shape.of(computeShape(array, new long[6])); + } + + /** + * Compute the shape of a single-dimension long array. + * + * @param array 1D array + * @return shape of the array + */ + public static Shape shapeOf(long[] array) { + return Shape.of(array.length); + } + + /** + * Compute the shape of a 3-dimensions long array. + * + * @param array 2D array + * @return shape of the array + */ + public static Shape shapeOf(long[][] array) { + return Shape.of(computeShape(array, new long[2])); + } + + /** + * Compute the shape of a 3-dimensions long array. + * + * @param array 3D array + * @return shape of the array + */ + public static Shape shapeOf(long[][][] array) { + return Shape.of(computeShape(array, new long[3])); + } + + /** + * Compute the shape of a 4-dimensions long array. + * + * @param array 4D array + * @return shape of the array + */ + public static Shape shapeOf(long[][][][] array) { + return Shape.of(computeShape(array, new long[4])); + } + + /** + * Compute the shape of a 5-dimensions long array. + * + * @param array 5D array + * @return shape of the array + */ + public static Shape shapeOf(long[][][][][] array) { + return Shape.of(computeShape(array, new long[5])); + } + + /** + * Compute the shape of a 6-dimensions long array. + * + * @param array 6D array + * @return shape of the array + */ + public static Shape shapeOf(long[][][][][][] array) { + return Shape.of(computeShape(array, new long[6])); + } + + /** + * Compute the shape of a single-dimension float array. + * + * @param array 1D array + * @return shape of the array + */ + public static Shape shapeOf(float[] array) { + return Shape.of(array.length); + } + + /** + * Compute the shape of a 3-dimensions float array. + * + * @param array 2D array + * @return shape of the array + */ + public static Shape shapeOf(float[][] array) { + return Shape.of(computeShape(array, new long[2])); + } + + /** + * Compute the shape of a 3-dimensions float array. + * + * @param array 3D array + * @return shape of the array + */ + public static Shape shapeOf(float[][][] array) { + return Shape.of(computeShape(array, new long[3])); + } + + /** + * Compute the shape of a 4-dimensions float array. + * + * @param array 4D array + * @return shape of the array + */ + public static Shape shapeOf(float[][][][] array) { + return Shape.of(computeShape(array, new long[4])); + } + + /** + * Compute the shape of a 5-dimensions float array. + * + * @param array 5D array + * @return shape of the array + */ + public static Shape shapeOf(float[][][][][] array) { + return Shape.of(computeShape(array, new long[5])); + } + + /** + * Compute the shape of a 6-dimensions float array. + * + * @param array 6D array + * @return shape of the array + */ + public static Shape shapeOf(float[][][][][][] array) { + return Shape.of(computeShape(array, new long[6])); + } + + /** + * Compute the shape of a single-dimension double array. + * + * @param array 1D array + * @return shape of the array + */ + public static Shape shapeOf(double[] array) { + return Shape.of(array.length); + } + + /** + * Compute the shape of a 3-dimensions double array. + * + * @param array 2D array + * @return shape of the array + */ + public static Shape shapeOf(double[][] array) { + return Shape.of(computeShape(array, new long[2])); + } + + /** + * Compute the shape of a 3-dimensions double array. + * + * @param array 3D array + * @return shape of the array + */ + public static Shape shapeOf(double[][][] array) { + return Shape.of(computeShape(array, new long[3])); + } + + /** + * Compute the shape of a 4-dimensions double array. + * + * @param array 4D array + * @return shape of the array + */ + public static Shape shapeOf(double[][][][] array) { + return Shape.of(computeShape(array, new long[4])); + } + + /** + * Compute the shape of a 5-dimensions double array. + * + * @param array 5D array + * @return shape of the array + */ + public static Shape shapeOf(double[][][][][] array) { + return Shape.of(computeShape(array, new long[5])); + } + + /** + * Compute the shape of a 6-dimensions double array. + * + * @param array 6D array + * @return shape of the array + */ + public static Shape shapeOf(double[][][][][][] array) { + return Shape.of(computeShape(array, new long[6])); + } + + /** + * Compute the shape of a single-dimension byte array. + * + * @param array 1D array + * @return shape of the array + */ + public static Shape shapeOf(byte[] array) { + return Shape.of(array.length); + } + + /** + * Compute the shape of a 3-dimensions byte array. + * + * @param array 2D array + * @return shape of the array + */ + public static Shape shapeOf(byte[][] array) { + return Shape.of(computeShape(array, new long[2])); + } + + /** + * Compute the shape of a 3-dimensions byte array. + * + * @param array 3D array + * @return shape of the array + */ + public static Shape shapeOf(byte[][][] array) { + return Shape.of(computeShape(array, new long[3])); + } + + /** + * Compute the shape of a 4-dimensions byte array. + * + * @param array 4D array + * @return shape of the array + */ + public static Shape shapeOf(byte[][][][] array) { + return Shape.of(computeShape(array, new long[4])); + } + + /** + * Compute the shape of a 5-dimensions byte array. + * + * @param array 5D array + * @return shape of the array + */ + public static Shape shapeOf(byte[][][][][] array) { + return Shape.of(computeShape(array, new long[5])); + } + + /** + * Compute the shape of a 6-dimensions byte array. + * + * @param array 6D array + * @return shape of the array + */ + public static Shape shapeOf(byte[][][][][][] array) { + return Shape.of(computeShape(array, new long[6])); + } + + /** + * Compute the shape of a single-dimension short array. + * + * @param array 1D array + * @return shape of the array + */ + public static Shape shapeOf(short[] array) { + return Shape.of(array.length); + } + + /** + * Compute the shape of a 3-dimensions short array. + * + * @param array 2D array + * @return shape of the array + */ + public static Shape shapeOf(short[][] array) { + return Shape.of(computeShape(array, new long[2])); + } + + /** + * Compute the shape of a 3-dimensions short array. + * + * @param array 3D array + * @return shape of the array + */ + public static Shape shapeOf(short[][][] array) { + return Shape.of(computeShape(array, new long[3])); + } + + /** + * Compute the shape of a 4-dimensions short array. + * + * @param array 4D array + * @return shape of the array + */ + public static Shape shapeOf(short[][][][] array) { + return Shape.of(computeShape(array, new long[4])); + } + + /** + * Compute the shape of a 5-dimensions short array. + * + * @param array 5D array + * @return shape of the array + */ + public static Shape shapeOf(short[][][][][] array) { + return Shape.of(computeShape(array, new long[5])); + } + + /** + * Compute the shape of a 6-dimensions short array. + * + * @param array 6D array + * @return shape of the array + */ + public static Shape shapeOf(short[][][][][][] array) { + return Shape.of(computeShape(array, new long[6])); + } + + /** + * Compute the shape of a single-dimension boolean array. + * + * @param array 1D array + * @return shape of the array + */ + public static Shape shapeOf(boolean[] array) { + return Shape.of(array.length); + } + + /** + * Compute the shape of a 3-dimensions boolean array. + * + * @param array 2D array + * @return shape of the array + */ + public static Shape shapeOf(boolean[][] array) { + return Shape.of(computeShape(array, new long[2])); + } + + /** + * Compute the shape of a 3-dimensions boolean array. + * + * @param array 3D array + * @return shape of the array + */ + public static Shape shapeOf(boolean[][][] array) { + return Shape.of(computeShape(array, new long[3])); + } + + /** + * Compute the shape of a 4-dimensions boolean array. + * + * @param array 4D array + * @return shape of the array + */ + public static Shape shapeOf(boolean[][][][] array) { + return Shape.of(computeShape(array, new long[4])); + } + + /** + * Compute the shape of a 5-dimensions boolean array. + * + * @param array 5D array + * @return shape of the array + */ + public static Shape shapeOf(boolean[][][][][] array) { + return Shape.of(computeShape(array, new long[5])); + } + + /** + * Compute the shape of a 6-dimensions boolean array. + * + * @param array 6D array + * @return shape of the array + */ + public static Shape shapeOf(boolean[][][][][][] array) { + return Shape.of(computeShape(array, new long[6])); + } + + /** + * Compute the shape of a single-dimension object array. + * + * @param array 1D array + * @return shape of the array + */ + public static Shape shapeOf(T[] array) { + return Shape.of(array.length); + } + + /** + * Compute the shape of a 3-dimensions object array. + * + * @param array 2D array + * @return shape of the array + */ + public static Shape shapeOf(T[][] array) { + return Shape.of(computeShape(array, new long[2])); + } + + /** + * Compute the shape of a 3-dimensions object array. + * + * @param array 3D array + * @return shape of the array + */ + public static Shape shapeOf(T[][][] array) { + return Shape.of(computeShape(array, new long[3])); + } + + /** + * Compute the shape of a 4-dimensions object array. + * + * @param array 4D array + * @return shape of the array + */ + public static Shape shapeOf(T[][][][] array) { + return Shape.of(computeShape(array, new long[4])); + } + + /** + * Compute the shape of a 5-dimensions object array. + * + * @param array 5D array + * @return shape of the array + */ + public static Shape shapeOf(T[][][][][] array) { + return Shape.of(computeShape(array, new long[5])); + } + + /** + * Compute the shape of a 6-dimensions object array. + * + * @param array 6D array + * @return shape of the array + */ + public static Shape shapeOf(T[][][][][][] array) { + return Shape.of(computeShape(array, new long[6])); + } + + private static void dimSize(int arrayLength, long[] shape, int dimIdx) { + if (shape[dimIdx] == 0) { + shape[dimIdx] = arrayLength; + } else if (shape[dimIdx] != arrayLength) { + shape[dimIdx] = Shape.UNKNOWN_SIZE; + } + } + + private static long[] computeShape(int[][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 2); + for (int i = 0; i < array.length; ++i) { + dimSize(array[i].length, shape, shape.length - 1); + } + return shape; + } + + private static long[] computeShape(int[][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 3); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(int[][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 4); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(int[][][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 5); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(int[][][][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 6); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(long[][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 2); + for (int i = 0; i < array.length; ++i) { + dimSize(array[i].length, shape, shape.length - 1); + } + return shape; + } + + private static long[] computeShape(long[][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 3); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(long[][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 4); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(long[][][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 5); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(long[][][][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 6); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(float[][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 2); + for (int i = 0; i < array.length; ++i) { + dimSize(array[i].length, shape, shape.length - 1); + } + return shape; + } + + private static long[] computeShape(float[][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 3); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(float[][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 4); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(float[][][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 5); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(float[][][][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 6); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(double[][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 2); + for (int i = 0; i < array.length; ++i) { + dimSize(array[i].length, shape, shape.length - 1); + } + return shape; + } + + private static long[] computeShape(double[][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 3); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(double[][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 4); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(double[][][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 5); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(double[][][][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 6); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(byte[][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 2); + for (int i = 0; i < array.length; ++i) { + dimSize(array[i].length, shape, shape.length - 1); + } + return shape; + } + + private static long[] computeShape(byte[][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 3); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(byte[][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 4); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(byte[][][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 5); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(byte[][][][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 6); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(short[][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 2); + for (int i = 0; i < array.length; ++i) { + dimSize(array[i].length, shape, shape.length - 1); + } + return shape; + } + + private static long[] computeShape(short[][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 3); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(short[][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 4); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(short[][][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 5); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(short[][][][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 6); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(boolean[][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 2); + for (int i = 0; i < array.length; ++i) { + dimSize(array[i].length, shape, shape.length - 1); + } + return shape; + } + + private static long[] computeShape(boolean[][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 3); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(boolean[][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 4); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(boolean[][][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 5); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(boolean[][][][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 6); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(T[][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 2); + for (int i = 0; i < array.length; ++i) { + dimSize(array[i].length, shape, shape.length - 1); + } + return shape; + } + + private static long[] computeShape(T[][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 3); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(T[][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 4); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(T[][][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 5); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } + + private static long[] computeShape(T[][][][][][] array, long[] shape) { + dimSize(array.length, shape, shape.length - 6); + for (int i = 0; i < array.length; ++i) { + computeShape(array[i], shape); + } + return shape; + } +} diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/AbstractNdArray.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/AbstractNdArray.java index 90534a037d4..a1eee8e5c24 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/AbstractNdArray.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/AbstractNdArray.java @@ -17,7 +17,6 @@ package org.tensorflow.tools.ndarray.impl; import org.tensorflow.tools.Shape; -import org.tensorflow.tools.buffer.DataBuffers; import org.tensorflow.tools.ndarray.NdArray; import org.tensorflow.tools.ndarray.NdArraySequence; import org.tensorflow.tools.ndarray.impl.dimension.DimensionalSpace; @@ -51,26 +50,6 @@ public NdArraySequence scalars() { return ElementSequence.create(this, shape().numDimensions() - 1); // negative if this array is a scalar } - @Override - public U read(T[] dst) { - return (U)read(DataBuffers.from(dst, false, false)); - } - - @Override - public U read(T[] dst, int offset) { - return (U)read(DataBuffers.from(dst, false, false).offset(offset)); - } - - @Override - public U write(T[] src) { - return (U)write(DataBuffers.from(src, true, false)); - } - - @Override - public U write(T[] src, int offset) { - return (U)write(DataBuffers.from(src, true, false).offset(offset)); - } - protected AbstractNdArray(DimensionalSpace dimensions) { this.dimensions = dimensions; } diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/Validator.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/Validator.java index 67172350b0c..b8e7cd2fdbe 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/Validator.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/Validator.java @@ -23,20 +23,6 @@ public class Validator { - public static void getArrayArgs(NdArray ndArray, int arrayLength, int arrayOffset) { - copyArrayArgs(arrayLength, arrayOffset); - if (arrayLength - arrayOffset < ndArray.size()) { - throw new BufferOverflowException(); - } - } - - public static void putArrayArgs(NdArray ndArray, int arrayLength, int arrayOffset) { - copyArrayArgs(arrayLength, arrayOffset); - if (arrayLength - arrayOffset < ndArray.size()) { - throw new BufferUnderflowException(); - } - } - public static void copyToNdArrayArgs(NdArray ndArray, NdArray otherNdArray) { if (!ndArray.shape().equals(otherNdArray.shape())) { throw new IllegalArgumentException("Can only copy to arrays of the same shape (" + diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/BooleanDenseNdArray.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/BooleanDenseNdArray.java index 990353eafd0..38758e41ada 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/BooleanDenseNdArray.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/BooleanDenseNdArray.java @@ -19,7 +19,6 @@ import org.tensorflow.tools.Shape; import org.tensorflow.tools.buffer.BooleanDataBuffer; import org.tensorflow.tools.buffer.DataBuffer; -import org.tensorflow.tools.buffer.DataBuffers; import org.tensorflow.tools.ndarray.BooleanNdArray; import org.tensorflow.tools.ndarray.NdArray; import org.tensorflow.tools.ndarray.impl.dimension.DimensionalSpace; @@ -43,18 +42,6 @@ public BooleanNdArray setBoolean(boolean value, long... indices) { return this; } - @Override - public BooleanNdArray read(boolean[] dst, int offset) { - Validator.getArrayArgs(this, dst.length, offset); - return read(DataBuffers.from(dst, false, false).offset(offset)); - } - - @Override - public BooleanNdArray write(boolean[] src, int offset) { - Validator.putArrayArgs(this, src.length, offset); - return write(DataBuffers.from(src, true, false).offset(offset)); - } - @Override public BooleanNdArray copyTo(NdArray dst) { Validator.copyToNdArrayArgs(this, dst); diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/ByteDenseNdArray.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/ByteDenseNdArray.java index e00628d95df..77735ef6bc6 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/ByteDenseNdArray.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/ByteDenseNdArray.java @@ -19,7 +19,6 @@ import org.tensorflow.tools.Shape; import org.tensorflow.tools.buffer.ByteDataBuffer; import org.tensorflow.tools.buffer.DataBuffer; -import org.tensorflow.tools.buffer.DataBuffers; import org.tensorflow.tools.ndarray.ByteNdArray; import org.tensorflow.tools.ndarray.NdArray; import org.tensorflow.tools.ndarray.impl.dimension.DimensionalSpace; @@ -43,18 +42,6 @@ public ByteNdArray setByte(byte value, long... indices) { return this; } - @Override - public ByteNdArray read(byte[] dst, int offset) { - Validator.getArrayArgs(this, dst.length, offset); - return read(DataBuffers.from(dst, false, false).offset(offset)); - } - - @Override - public ByteNdArray write(byte[] src, int offset) { - Validator.putArrayArgs(this, src.length, offset); - return write(DataBuffers.from(src, true, false).offset(offset)); - } - @Override public ByteNdArray copyTo(NdArray dst) { Validator.copyToNdArrayArgs(this, dst); diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/DoubleDenseNdArray.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/DoubleDenseNdArray.java index 63b58b309af..0f1e91fc31c 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/DoubleDenseNdArray.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/DoubleDenseNdArray.java @@ -18,7 +18,6 @@ import org.tensorflow.tools.Shape; import org.tensorflow.tools.buffer.DataBuffer; -import org.tensorflow.tools.buffer.DataBuffers; import org.tensorflow.tools.buffer.DoubleDataBuffer; import org.tensorflow.tools.ndarray.DoubleNdArray; import org.tensorflow.tools.ndarray.NdArray; @@ -43,18 +42,6 @@ public DoubleNdArray setDouble(double value, long... indices) { return this; } - @Override - public DoubleNdArray read(double[] dst, int offset) { - Validator.getArrayArgs(this, dst.length, offset); - return read(DataBuffers.from(dst, false, false).offset(offset)); - } - - @Override - public DoubleNdArray write(double[] src, int offset) { - Validator.putArrayArgs(this, src.length, offset); - return write(DataBuffers.from(src, true, false).offset(offset)); - } - @Override public DoubleNdArray copyTo(NdArray dst) { Validator.copyToNdArrayArgs(this, dst); diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/FloatDenseNdArray.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/FloatDenseNdArray.java index d37502f2b92..dd159f1a22a 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/FloatDenseNdArray.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/FloatDenseNdArray.java @@ -18,7 +18,6 @@ import org.tensorflow.tools.Shape; import org.tensorflow.tools.buffer.DataBuffer; -import org.tensorflow.tools.buffer.DataBuffers; import org.tensorflow.tools.buffer.FloatDataBuffer; import org.tensorflow.tools.ndarray.FloatNdArray; import org.tensorflow.tools.ndarray.NdArray; @@ -43,18 +42,6 @@ public FloatNdArray setFloat(float value, long... indices) { return this; } - @Override - public FloatNdArray read(float[] dst, int offset) { - Validator.getArrayArgs(this, dst.length, offset); - return read(DataBuffers.from(dst, false, false).offset(offset)); - } - - @Override - public FloatNdArray write(float[] src, int offset) { - Validator.putArrayArgs(this, src.length, offset); - return write(DataBuffers.from(src, true, false).offset(offset)); - } - @Override public FloatNdArray copyTo(NdArray dst) { Validator.copyToNdArrayArgs(this, dst); diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/IntDenseNdArray.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/IntDenseNdArray.java index bb7682a174c..567e28a97ca 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/IntDenseNdArray.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/IntDenseNdArray.java @@ -18,7 +18,6 @@ import org.tensorflow.tools.Shape; import org.tensorflow.tools.buffer.DataBuffer; -import org.tensorflow.tools.buffer.DataBuffers; import org.tensorflow.tools.buffer.IntDataBuffer; import org.tensorflow.tools.ndarray.IntNdArray; import org.tensorflow.tools.ndarray.NdArray; @@ -43,18 +42,6 @@ public IntNdArray setInt(int value, long... indices) { return this; } - @Override - public IntNdArray read(int[] dst, int offset) { - Validator.getArrayArgs(this, dst.length, offset); - return read(DataBuffers.from(dst, false, false).offset(offset)); - } - - @Override - public IntNdArray write(int[] src, int offset) { - Validator.putArrayArgs(this, src.length, offset); - return write(DataBuffers.from(src, true, false).offset(offset)); - } - @Override public IntNdArray copyTo(NdArray dst) { Validator.copyToNdArrayArgs(this, dst); diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/LongDenseNdArray.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/LongDenseNdArray.java index d702d092b66..1ed4852d20a 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/LongDenseNdArray.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/LongDenseNdArray.java @@ -18,7 +18,6 @@ import org.tensorflow.tools.Shape; import org.tensorflow.tools.buffer.DataBuffer; -import org.tensorflow.tools.buffer.DataBuffers; import org.tensorflow.tools.buffer.LongDataBuffer; import org.tensorflow.tools.ndarray.LongNdArray; import org.tensorflow.tools.ndarray.NdArray; @@ -43,18 +42,6 @@ public LongNdArray setLong(long value, long... indices) { return this; } - @Override - public LongNdArray read(long[] dst, int offset) { - Validator.getArrayArgs(this, dst.length, offset); - return read(DataBuffers.from(dst, false, false).offset(offset)); - } - - @Override - public LongNdArray write(long[] src, int offset) { - Validator.putArrayArgs(this, src.length, offset); - return write(DataBuffers.from(src, true, false).offset(offset)); - } - @Override public LongNdArray copyTo(NdArray dst) { Validator.copyToNdArrayArgs(this, dst); diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/ShortDenseNdArray.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/ShortDenseNdArray.java index c38e02904ee..a0f6bc0a6cf 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/ShortDenseNdArray.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dense/ShortDenseNdArray.java @@ -18,7 +18,6 @@ import org.tensorflow.tools.Shape; import org.tensorflow.tools.buffer.DataBuffer; -import org.tensorflow.tools.buffer.DataBuffers; import org.tensorflow.tools.buffer.ShortDataBuffer; import org.tensorflow.tools.ndarray.NdArray; import org.tensorflow.tools.ndarray.ShortNdArray; @@ -43,18 +42,6 @@ public ShortNdArray setShort(short value, long... indices) { return this; } - @Override - public ShortNdArray read(short[] dst, int offset) { - Validator.getArrayArgs(this, dst.length, offset); - return read(DataBuffers.from(dst, false, false).offset(offset)); - } - - @Override - public ShortNdArray write(short[] src, int offset) { - Validator.putArrayArgs(this, src.length, offset); - return write(DataBuffers.from(src, true, false).offset(offset)); - } - @Override public ShortNdArray copyTo(NdArray dst) { Validator.copyToNdArrayArgs(this, dst); diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dimension/DimensionalSpace.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dimension/DimensionalSpace.java index 9b4aa2f26f0..9211e837946 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dimension/DimensionalSpace.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/dimension/DimensionalSpace.java @@ -164,6 +164,6 @@ private static Shape shape(Dimension[] dimensions) { for (Dimension dimension : dimensions) { shapeDimSizes[i++] = dimension.numElements(); } - return Shape.make(shapeDimSizes); + return Shape.of(shapeDimSizes); } } diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/sequence/NdPositionIterator.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/sequence/NdPositionIterator.java index 9b9aac8c411..f43d740daf6 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/sequence/NdPositionIterator.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/sequence/NdPositionIterator.java @@ -17,6 +17,7 @@ package org.tensorflow.tools.ndarray.impl.sequence; +import java.util.NoSuchElementException; import org.tensorflow.tools.ndarray.impl.dimension.DimensionalSpace; class NdPositionIterator implements IndexedPositionIterator { @@ -28,6 +29,9 @@ public boolean hasNext() { @Override public long nextLong() { + if (!hasNext()) { + throw new NoSuchElementException(); + } long position = dimensions.positionOf(coords); increment(); return position; diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/sequence/SequentialPositionIterator.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/sequence/SequentialPositionIterator.java index 9f4a9c005c2..01e39c1803c 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/sequence/SequentialPositionIterator.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/impl/sequence/SequentialPositionIterator.java @@ -17,6 +17,7 @@ package org.tensorflow.tools.ndarray.impl.sequence; +import java.util.NoSuchElementException; import org.tensorflow.tools.ndarray.impl.dimension.DimensionalSpace; class SequentialPositionIterator implements PositionIterator { @@ -28,6 +29,9 @@ public boolean hasNext() { @Override public long nextLong() { + if (!hasNext()) { + throw new NoSuchElementException(); + } return stride * index++; } diff --git a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/index/Indices.java b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/index/Indices.java index 1de2e886688..10caa35bde5 100644 --- a/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/index/Indices.java +++ b/tensorflow-tools/src/main/java/org/tensorflow/tools/ndarray/index/Indices.java @@ -89,7 +89,7 @@ public static Index seq(long... coords) { if (coords == null) { throw new IllegalArgumentException(); } - return new Sequence(NdArrays.wrap(DataBuffers.from(coords, true, false), Shape.make(coords.length))); + return new Sequence(NdArrays.wrap(DataBuffers.from(coords, true, false), Shape.of(coords.length))); } /** diff --git a/tensorflow-tools/src/test/java/org/tensorflow/tools/ShapeTest.java b/tensorflow-tools/src/test/java/org/tensorflow/tools/ShapeTest.java index 8e8ac0297d2..d2ea5b6ab9f 100644 --- a/tensorflow-tools/src/test/java/org/tensorflow/tools/ShapeTest.java +++ b/tensorflow-tools/src/test/java/org/tensorflow/tools/ShapeTest.java @@ -27,7 +27,7 @@ public class ShapeTest { @Test public void allKnownDimensions() { - Shape shape = Shape.make(5, 4, 5); + Shape shape = Shape.of(5, 4, 5); assertEquals(3, shape.numDimensions()); assertEquals(5, shape.size(0)); assertEquals(4, shape.size(1)); @@ -50,10 +50,10 @@ public void allKnownDimensions() { @Test public void hashCodeEquals() { - Shape shape1 = Shape.make(5, 4, 5); - Shape shape2 = Shape.make(5, 4, 5); - Shape shape3 = Shape.make(5, 4, 5, 6); - Shape shape4 = Shape.make(5, 4, 1); + Shape shape1 = Shape.of(5, 4, 5); + Shape shape2 = Shape.of(5, 4, 5); + Shape shape3 = Shape.of(5, 4, 5, 6); + Shape shape4 = Shape.of(5, 4, 1); assertEquals(shape1, shape2); assertEquals(shape1.hashCode(), shape2.hashCode()); @@ -62,13 +62,13 @@ public void hashCodeEquals() { assertNotEquals(shape1, shape4); assertNotEquals(shape1.hashCode(), shape4.hashCode()); - Shape scalar1 = Shape.make(); - Shape scalar2 = Shape.make(); + Shape scalar1 = Shape.of(); + Shape scalar2 = Shape.of(); assertEquals(scalar1, scalar2); assertNotEquals(scalar1, shape1); - Shape unknown1 = Shape.make(-1, 4, 5); - Shape unknown2 = Shape.make(-1, 4, 5); + Shape unknown1 = Shape.of(-1, 4, 5); + Shape unknown2 = Shape.of(-1, 4, 5); assertNotEquals(unknown1, unknown2); assertNotEquals(unknown1, shape1); } diff --git a/tensorflow-tools/src/test/java/org/tensorflow/tools/benchmark/NdArrayBenchmark.java b/tensorflow-tools/src/test/java/org/tensorflow/tools/benchmark/NdArrayBenchmark.java index 25214cfdf66..63a2a871120 100644 --- a/tensorflow-tools/src/test/java/org/tensorflow/tools/benchmark/NdArrayBenchmark.java +++ b/tensorflow-tools/src/test/java/org/tensorflow/tools/benchmark/NdArrayBenchmark.java @@ -36,6 +36,7 @@ import org.tensorflow.tools.Shape; import org.tensorflow.tools.ndarray.FloatNdArray; import org.tensorflow.tools.ndarray.NdArrays; +import org.tensorflow.tools.ndarray.StdArrays; @Fork(value = 1, jvmArgs = {"-Xms4G", "-Xmx4G"}) @BenchmarkMode(Mode.AverageTime) @@ -44,32 +45,28 @@ @State(Scope.Benchmark) public class NdArrayBenchmark { - static final String TEST_IMAGE = "castle.jpg"; - static final int BATCH_SIZE = 60; - - private FloatNdArray pixels; - private FloatNdArray channels; - private FloatNdArray batches; - private FloatNdArray firstBatch; + public static void main(String[] args) throws IOException, RunnerException { + org.openjdk.jmh.Main.main(args); + } @Setup public void setUp() throws IOException { BufferedImage image = ImageIO.read(getClass().getClassLoader().getResourceAsStream(TEST_IMAGE)); int numPixels = image.getWidth() * image.getHeight(); - pixels = NdArrays.ofFloats(Shape.make(numPixels, 3)); - channels = NdArrays.ofFloats(Shape.make(3, numPixels)); + pixels = NdArrays.ofFloats(Shape.of(numPixels, 3)); + channels = NdArrays.ofFloats(Shape.of(3, numPixels)); Raster imageData = image.getData(); float[] pixel = new float[3]; for (int y = 0, pixelIdx = 0; y < image.getHeight(); ++y) { for (int x = 0; x < image.getWidth(); ++x, ++pixelIdx) { imageData.getPixel(x, y, pixel); - pixels.get(pixelIdx).write(pixel); - channels.slice(all(), at(pixelIdx)).write(pixel); + StdArrays.copyTo(pixels.get(pixelIdx), pixel); + StdArrays.copyTo(channels.slice(all(), at(pixelIdx)), pixel); } } - batches = NdArrays.ofFloats(Shape.make(BATCH_SIZE, 3, numPixels)); + batches = NdArrays.ofFloats(Shape.of(BATCH_SIZE, 3, numPixels)); firstBatch = batches.get(0); } @@ -140,7 +137,11 @@ public void writeAllPixelsByIndex() { ); } - public static void main(String[] args) throws IOException, RunnerException { - org.openjdk.jmh.Main.main(args); - } + private static final String TEST_IMAGE = "castle.jpg"; + private static final int BATCH_SIZE = 60; + + private FloatNdArray pixels; + private FloatNdArray channels; + private FloatNdArray batches; + private FloatNdArray firstBatch; } diff --git a/tensorflow-tools/src/test/java/org/tensorflow/tools/buffer/DataBufferTestBase.java b/tensorflow-tools/src/test/java/org/tensorflow/tools/buffer/DataBufferTestBase.java index 97574d6b574..e1f44a6799f 100644 --- a/tensorflow-tools/src/test/java/org/tensorflow/tools/buffer/DataBufferTestBase.java +++ b/tensorflow-tools/src/test/java/org/tensorflow/tools/buffer/DataBufferTestBase.java @@ -155,4 +155,13 @@ public void copyToBuffer() { // as expected } } + + @Test + public void createFromVarargs() { + DataBuffer buffer = DataBuffers.ofObjects(valueOf(1L), valueOf(2L), valueOf(3L)); + assertEquals(3, buffer.size()); + assertEquals(valueOf(1L), buffer.getObject(0)); + assertEquals(valueOf(2L), buffer.getObject(1)); + assertEquals(valueOf(3L), buffer.getObject(2)); + } } diff --git a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/BooleanNdArrayTestBase.java b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/BooleanNdArrayTestBase.java index c2f64fcb23e..fa81c69312c 100644 --- a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/BooleanNdArrayTestBase.java +++ b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/BooleanNdArrayTestBase.java @@ -18,11 +18,8 @@ import static junit.framework.TestCase.assertTrue; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.fail; import static org.tensorflow.tools.ndarray.NdArrays.vectorOf; -import java.nio.BufferOverflowException; -import java.nio.BufferUnderflowException; import org.junit.Test; import org.tensorflow.tools.Shape; @@ -38,100 +35,24 @@ protected Boolean valueOf(Long val) { @Test public void iteratePrimitiveElements() { - BooleanNdArray matrix3d = allocate(Shape.make(5, 4, 5)); + BooleanNdArray matrix3d = allocate(Shape.of(5, 4, 5)); - matrix3d.scalars().forEachIndexed((coords, scalar) -> { - scalar.setBoolean(coords[2] > 0); - }); + matrix3d.scalars().forEachIndexed((coords, scalar) -> + scalar.setBoolean(coords[2] > 0) + ); assertFalse(matrix3d.getBoolean(0, 0, 0)); assertTrue(matrix3d.getBoolean(0, 0, 1)); assertTrue(matrix3d.getBoolean(0, 0, 4)); assertTrue(matrix3d.getBoolean(0, 1, 2)); - matrix3d.elements(1).forEach(vector -> { - vector.set(vectorOf(true, false, true, false, true)); - }); + matrix3d.elements(1).forEach(vector -> + vector.set(vectorOf(true, false, true, false, true)) + ); assertTrue(matrix3d.getBoolean(0, 0, 0)); assertFalse(matrix3d.getBoolean(0, 0, 1)); assertTrue(matrix3d.getBoolean(0, 0, 4)); assertTrue(matrix3d.getBoolean(0, 1, 2)); } - - @Test - public void writeAndReadWithPrimitiveArrays() { - boolean[] values = new boolean[] { true, true, false, false, true, true, false, true, false, false, true, false, true, false, true, true }; - - BooleanNdArray matrix = allocate(Shape.make(3, 4)); - matrix.write(values); - assertTrue(matrix.getBoolean(0, 0)); - assertFalse(matrix.getBoolean(0, 3)); - assertTrue(matrix.getBoolean(1, 0)); - assertFalse(matrix.getBoolean(2, 3)); - - matrix.write(values, 4); - assertTrue(matrix.getBoolean(0, 0)); - assertTrue(matrix.getBoolean(0, 3)); - assertFalse(matrix.getBoolean(1, 0)); - assertTrue(matrix.getBoolean(2, 3)); - - matrix.setBoolean(true, 1, 0); - matrix.read(values, 2); - assertTrue(values[2]); - assertTrue(values[5]); - - matrix.read(values); - assertTrue(values[0]); - assertTrue(values[3]); - - try { - matrix.write(new boolean[] { true, true, true, true }); - fail(); - } catch (BufferUnderflowException e) { - // as expected - } - try { - matrix.write(values, values.length); - fail(); - } catch (BufferUnderflowException e) { - // as expected - } - try { - matrix.write(values, -1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.write(values, values.length + 1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.read(new boolean[4]); - fail(); - } catch (BufferOverflowException e) { - // as expected - } - try { - matrix.read(values, values.length); - fail(); - } catch (BufferOverflowException e) { - // as expected - } - try { - matrix.read(values, -1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.read(values, values.length + 1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - } } diff --git a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/ByteNdArrayTestBase.java b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/ByteNdArrayTestBase.java index 9fed8019559..542123bef27 100644 --- a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/ByteNdArrayTestBase.java +++ b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/ByteNdArrayTestBase.java @@ -17,10 +17,7 @@ package org.tensorflow.tools.ndarray; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; -import java.nio.BufferOverflowException; -import java.nio.BufferUnderflowException; import org.junit.Test; import org.tensorflow.tools.Shape; @@ -36,107 +33,24 @@ protected Byte valueOf(Long val) { @Test public void iteratePrimitiveElements() { - ByteNdArray matrix3d = allocate(Shape.make(5, 4, 5)); + ByteNdArray matrix3d = allocate(Shape.of(5, 4, 5)); - matrix3d.scalars().forEachIndexed((coords, scalar) -> { - scalar.setByte((byte)coords[2]); - }); + matrix3d.scalars().forEachIndexed((coords, scalar) -> + scalar.setByte((byte)coords[2]) + ); assertEquals(0, matrix3d.getByte(0, 0, 0)); assertEquals(1, matrix3d.getByte(0, 0, 1)); assertEquals(4, matrix3d.getByte(0, 0, 4)); assertEquals(2, matrix3d.getByte(0, 1, 2)); - matrix3d.elements(1).forEach(vector -> { - vector.set(NdArrays.vectorOf((byte)5, (byte)6, (byte)7, (byte)8, (byte)9)); - }); + matrix3d.elements(1).forEach(vector -> + vector.set(NdArrays.vectorOf((byte)5, (byte)6, (byte)7, (byte)8, (byte)9)) + ); assertEquals(5, matrix3d.getByte(0, 0, 0)); assertEquals(6, matrix3d.getByte(0, 0, 1)); assertEquals(9, matrix3d.getByte(0, 0, 4)); assertEquals(7, matrix3d.getByte(0, 1, 2)); } - - @Test - public void writeAndReadWithPrimitiveArrays() { - byte[] values = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; - - ByteNdArray matrix = allocate(Shape.make(3, 4)); - matrix.write(values); - assertEquals(0, matrix.getByte(0, 0)); - assertEquals(3, matrix.getByte(0, 3)); - assertEquals(4, matrix.getByte(1, 0)); - assertEquals(11, matrix.getByte(2, 3)); - - matrix.write(values, 4); - assertEquals(4, matrix.getByte(0, 0)); - assertEquals(7, matrix.getByte(0, 3)); - assertEquals(8, matrix.getByte(1, 0)); - assertEquals(15, matrix.getByte(2, 3)); - - matrix.setByte((byte)100, 1, 0); - matrix.read(values, 2); - assertEquals(4, values[2]); - assertEquals(7, values[5]); - assertEquals(100, values[6]); - assertEquals(15, values[13]); - assertEquals(15, values[15]); - - matrix.read(values); - assertEquals(4, values[0]); - assertEquals(7, values[3]); - assertEquals(100, values[4]); - assertEquals(15, values[11]); - assertEquals(15, values[13]); - assertEquals(15, values[15]); - - try { - matrix.write(new byte[] { 1, 2, 3, 4 }); - fail(); - } catch (BufferUnderflowException e) { - // as expected - } - try { - matrix.write(values, values.length); - fail(); - } catch (BufferUnderflowException e) { - // as expected - } - try { - matrix.write(values, -1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.write(values, values.length + 1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.read(new byte[4]); - fail(); - } catch (BufferOverflowException e) { - // as expected - } - try { - matrix.read(values, values.length); - fail(); - } catch (BufferOverflowException e) { - // as expected - } - try { - matrix.read(values, -1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.read(values, values.length + 1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - } } diff --git a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/DoubleNdArrayTestBase.java b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/DoubleNdArrayTestBase.java index 629417bd3e4..c05fc5ab680 100644 --- a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/DoubleNdArrayTestBase.java +++ b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/DoubleNdArrayTestBase.java @@ -17,10 +17,7 @@ package org.tensorflow.tools.ndarray; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; -import java.nio.BufferOverflowException; -import java.nio.BufferUnderflowException; import org.junit.Test; import org.tensorflow.tools.Shape; @@ -36,107 +33,24 @@ protected Double valueOf(Long val) { @Test public void iteratePrimitiveElements() { - DoubleNdArray matrix3d = allocate(Shape.make(5, 4, 5)); + DoubleNdArray matrix3d = allocate(Shape.of(5, 4, 5)); - matrix3d.scalars().forEachIndexed((coords, scalar) -> { - scalar.setDouble((double)coords[2]); - }); + matrix3d.scalars().forEachIndexed((coords, scalar) -> + scalar.setDouble((double)coords[2]) + ); assertEquals(0.0, matrix3d.getDouble(0, 0, 0), 0.0); assertEquals(1.0, matrix3d.getDouble(0, 0, 1), 0.0); assertEquals(4.0, matrix3d.getDouble(0, 0, 4), 0.0); assertEquals(2.0, matrix3d.getDouble(0, 1, 2), 0.0); - matrix3d.elements(1).forEach(vector -> { - vector.set(NdArrays.vectorOf(5.0, 6.0, 7.0, 8.0, 9.0)); - }); + matrix3d.elements(1).forEach(vector -> + vector.set(NdArrays.vectorOf(5.0, 6.0, 7.0, 8.0, 9.0)) + ); assertEquals(5, matrix3d.getDouble(0, 0, 0), 0.0); assertEquals(6, matrix3d.getDouble(0, 0, 1), 0.0); assertEquals(9, matrix3d.getDouble(0, 0, 4), 0.0); assertEquals(7, matrix3d.getDouble(0, 1, 2), 0.0); } - - @Test - public void writeAndReadWithPrimitiveArrays() { - double[] values = new double[] { 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5 }; - - DoubleNdArray matrix = allocate(Shape.make(3, 4)); - matrix.write(values); - assertEquals(0.0, matrix.getDouble(0, 0), 0.0); - assertEquals(0.3, matrix.getDouble(0, 3), 0.0); - assertEquals(0.4, matrix.getDouble(1, 0), 0.0); - assertEquals(1.1, matrix.getDouble(2, 3), 0.0); - - matrix.write(values, 4); - assertEquals(0.4, matrix.getDouble(0, 0), 0.0); - assertEquals(0.7, matrix.getDouble(0, 3), 0.0); - assertEquals(0.8, matrix.getDouble(1, 0), 0.0); - assertEquals(1.5, matrix.getDouble(2, 3), 0.0); - - matrix.setDouble(100.5, 1, 0); - matrix.read(values, 2); - assertEquals(0.4, values[2], 0); - assertEquals(0.7, values[5], 0); - assertEquals(100.5, values[6], 0); - assertEquals(1.5, values[13], 0); - assertEquals(1.5, values[15], 0); - - matrix.read(values); - assertEquals(0.4, values[0], 0); - assertEquals(0.7, values[3], 0); - assertEquals(100.5, values[4], 0); - assertEquals(1.5, values[11], 0); - assertEquals(1.5, values[13], 0); - assertEquals(1.5, values[15], 0); - - try { - matrix.write(new double[] { 0.1, 0.2, 0.3, 0.4 }); - fail(); - } catch (BufferUnderflowException e) { - // as expected - } - try { - matrix.write(values, values.length); - fail(); - } catch (BufferUnderflowException e) { - // as expected - } - try { - matrix.write(values, -1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.write(values, values.length + 1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.read(new double[4]); - fail(); - } catch (BufferOverflowException e) { - // as expected - } - try { - matrix.read(values, values.length); - fail(); - } catch (BufferOverflowException e) { - // as expected - } - try { - matrix.read(values, -1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.read(values, values.length + 1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - } } diff --git a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/FloatNdArrayTestBase.java b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/FloatNdArrayTestBase.java index aadf6102cd7..001be3c9d0a 100644 --- a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/FloatNdArrayTestBase.java +++ b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/FloatNdArrayTestBase.java @@ -17,10 +17,7 @@ package org.tensorflow.tools.ndarray; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; -import java.nio.BufferOverflowException; -import java.nio.BufferUnderflowException; import org.junit.Test; import org.tensorflow.tools.Shape; @@ -36,107 +33,24 @@ protected Float valueOf(Long val) { @Test public void iteratePrimitiveElements() { - FloatNdArray matrix3d = allocate(Shape.make(5, 4, 5)); + FloatNdArray matrix3d = allocate(Shape.of(5, 4, 5)); - matrix3d.scalars().forEachIndexed((coords, scalar) -> { - scalar.setFloat((float)coords[2]); - }); + matrix3d.scalars().forEachIndexed((coords, scalar) -> + scalar.setFloat((float)coords[2]) + ); assertEquals(0.0f, matrix3d.getFloat(0, 0, 0), 0.0f); assertEquals(1.0f, matrix3d.getFloat(0, 0, 1), 0.0f); assertEquals(4.0f, matrix3d.getFloat(0, 0, 4), 0.0f); assertEquals(2.0f, matrix3d.getFloat(0, 1, 2), 0.0f); - matrix3d.elements(1).forEach(vector -> { - vector.set(NdArrays.vectorOf(5.0f, 6.0f, 7.0f, 8.0f, 9.0f)); - }); + matrix3d.elements(1).forEach(vector -> + vector.set(NdArrays.vectorOf(5.0f, 6.0f, 7.0f, 8.0f, 9.0f)) + ); assertEquals(5, matrix3d.getFloat(0, 0, 0), 0.0f); assertEquals(6, matrix3d.getFloat(0, 0, 1), 0.0f); assertEquals(9, matrix3d.getFloat(0, 0, 4), 0.0f); assertEquals(7, matrix3d.getFloat(0, 1, 2), 0.0f); } - - @Test - public void writeAndReadWithPrimitiveArrays() { - float[] values = new float[] { 0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f }; - - FloatNdArray matrix = allocate(Shape.make(3, 4)); - matrix.write(values); - assertEquals(0.0f, matrix.getFloat(0, 0), 0.0f); - assertEquals(0.3f, matrix.getFloat(0, 3), 0.0f); - assertEquals(0.4f, matrix.getFloat(1, 0), 0.0f); - assertEquals(1.1f, matrix.getFloat(2, 3), 0.0f); - - matrix.write(values, 4); - assertEquals(0.4f, matrix.getFloat(0, 0), 0.0f); - assertEquals(0.7f, matrix.getFloat(0, 3), 0.0f); - assertEquals(0.8f, matrix.getFloat(1, 0), 0.0f); - assertEquals(1.5f, matrix.getFloat(2, 3), 0.0f); - - matrix.setFloat(100.5f, 1, 0); - matrix.read(values, 2); - assertEquals(0.4f, values[2], 0); - assertEquals(0.7f, values[5], 0); - assertEquals(100.5f, values[6], 0); - assertEquals(1.5f, values[13], 0); - assertEquals(1.5f, values[15], 0); - - matrix.read(values); - assertEquals(0.4f, values[0], 0); - assertEquals(0.7f, values[3], 0); - assertEquals(100.5f, values[4], 0); - assertEquals(1.5f, values[11], 0); - assertEquals(1.5f, values[13], 0); - assertEquals(1.5f, values[15], 0); - - try { - matrix.write(new float[] { 0.1f, 0.2f, 0.3f, 0.4f }); - fail(); - } catch (BufferUnderflowException e) { - // as expected - } - try { - matrix.write(values, values.length); - fail(); - } catch (BufferUnderflowException e) { - // as expected - } - try { - matrix.write(values, -1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.write(values, values.length + 1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.read(new float[4]); - fail(); - } catch (BufferOverflowException e) { - // as expected - } - try { - matrix.read(values, values.length); - fail(); - } catch (BufferOverflowException e) { - // as expected - } - try { - matrix.read(values, -1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.read(values, values.length + 1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - } } diff --git a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/IntNdArrayTestBase.java b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/IntNdArrayTestBase.java index 8aba532e957..f90785151e8 100644 --- a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/IntNdArrayTestBase.java +++ b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/IntNdArrayTestBase.java @@ -17,10 +17,7 @@ package org.tensorflow.tools.ndarray; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; -import java.nio.BufferOverflowException; -import java.nio.BufferUnderflowException; import org.junit.Test; import org.tensorflow.tools.Shape; @@ -36,107 +33,24 @@ protected Integer valueOf(Long val) { @Test public void iteratePrimitiveElements() { - IntNdArray matrix3d = allocate(Shape.make(5, 4, 5)); + IntNdArray matrix3d = allocate(Shape.of(5, 4, 5)); - matrix3d.scalars().forEachIndexed((coords, scalar) -> { - scalar.setInt((int)coords[2]); - }); + matrix3d.scalars().forEachIndexed((coords, scalar) -> + scalar.setInt((int)coords[2]) + ); assertEquals(0, matrix3d.getInt(0, 0, 0)); assertEquals(1, matrix3d.getInt(0, 0, 1)); assertEquals(4, matrix3d.getInt(0, 0, 4)); assertEquals(2, matrix3d.getInt(0, 1, 2)); - matrix3d.elements(1).forEach(vector -> { - vector.set(NdArrays.vectorOf(5, 6, 7, 8, 9)); - }); + matrix3d.elements(1).forEach(vector -> + vector.set(NdArrays.vectorOf(5, 6, 7, 8, 9)) + ); assertEquals(5, matrix3d.getInt(0, 0, 0)); assertEquals(6, matrix3d.getInt(0, 0, 1)); assertEquals(9, matrix3d.getInt(0, 0, 4)); assertEquals(7, matrix3d.getInt(0, 1, 2)); } - - @Test - public void writeAndReadWithPrimitiveArrays() { - int[] values = new int[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; - - IntNdArray matrix = allocate(Shape.make(3, 4)); - matrix.write(values); - assertEquals(0, matrix.getInt(0, 0)); - assertEquals(3, matrix.getInt(0, 3)); - assertEquals(4, matrix.getInt(1, 0)); - assertEquals(11, matrix.getInt(2, 3)); - - matrix.write(values, 4); - assertEquals(4, matrix.getInt(0, 0)); - assertEquals(7, matrix.getInt(0, 3)); - assertEquals(8, matrix.getInt(1, 0)); - assertEquals(15, matrix.getInt(2, 3)); - - matrix.setInt(100, 1, 0); - matrix.read(values, 2); - assertEquals(4, values[2]); - assertEquals(7, values[5]); - assertEquals(100, values[6]); - assertEquals(15, values[13]); - assertEquals(15, values[15]); - - matrix.read(values); - assertEquals(4, values[0]); - assertEquals(7, values[3]); - assertEquals(100, values[4]); - assertEquals(15, values[11]); - assertEquals(15, values[13]); - assertEquals(15, values[15]); - - try { - matrix.write(new int[] { 1, 2, 3, 4 }); - fail(); - } catch (BufferUnderflowException e) { - // as expected - } - try { - matrix.write(values, values.length); - fail(); - } catch (BufferUnderflowException e) { - // as expected - } - try { - matrix.write(values, -1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.write(values, values.length + 1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.read(new int[4]); - fail(); - } catch (BufferOverflowException e) { - // as expected - } - try { - matrix.read(values, values.length); - fail(); - } catch (BufferOverflowException e) { - // as expected - } - try { - matrix.read(values, -1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.read(values, values.length + 1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - } } diff --git a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/LongNdArrayTestBase.java b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/LongNdArrayTestBase.java index b08d361ff11..68e982075f1 100644 --- a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/LongNdArrayTestBase.java +++ b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/LongNdArrayTestBase.java @@ -17,10 +17,7 @@ package org.tensorflow.tools.ndarray; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; -import java.nio.BufferOverflowException; -import java.nio.BufferUnderflowException; import org.junit.Test; import org.tensorflow.tools.Shape; @@ -35,85 +32,25 @@ protected Long valueOf(Long val) { } @Test - public void writeAndReadWithPrimitiveArrays() { - long[] values = new long[] { 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L, 11L, 12L, 13L, 14L, 15L }; - - LongNdArray matrix = allocate(Shape.make(3, 4)); - matrix.write(values); - assertEquals(0L, matrix.getLong(0, 0)); - assertEquals(3L, matrix.getLong(0, 3)); - assertEquals(4L, matrix.getLong(1, 0)); - assertEquals(11L, matrix.getLong(2, 3)); - - matrix.write(values, 4); - assertEquals(4L, matrix.getLong(0, 0)); - assertEquals(7L, matrix.getLong(0, 3)); - assertEquals(8L, matrix.getLong(1, 0)); - assertEquals(15L, matrix.getLong(2, 3)); - - matrix.setLong(100L, 1, 0); - matrix.read(values, 2); - assertEquals(4L, values[2]); - assertEquals(7L, values[5]); - assertEquals(100L, values[6]); - assertEquals(15L, values[13]); - assertEquals(15L, values[15]); - - matrix.read(values); - assertEquals(4L, values[0]); - assertEquals(7L, values[3]); - assertEquals(100L, values[4]); - assertEquals(15L, values[11]); - assertEquals(15L, values[13]); - assertEquals(15L, values[15]); - - try { - matrix.write(new long[] { 1, 2, 3, 4 }); - fail(); - } catch (BufferUnderflowException e) { - // as expected - } - try { - matrix.write(values, values.length); - fail(); - } catch (BufferUnderflowException e) { - // as expected - } - try { - matrix.write(values, -1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.write(values, values.length + 1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.read(new long[4]); - fail(); - } catch (BufferOverflowException e) { - // as expected - } - try { - matrix.read(values, values.length); - fail(); - } catch (BufferOverflowException e) { - // as expected - } - try { - matrix.read(values, -1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.read(values, values.length + 1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } + public void iteratePrimitiveElements() { + LongNdArray matrix3d = allocate(Shape.of(5, 4, 5)); + + matrix3d.scalars().forEachIndexed((coords, scalar) -> + scalar.setLong(coords[2]) + ); + + assertEquals(0, matrix3d.getLong(0, 0, 0)); + assertEquals(1, matrix3d.getLong(0, 0, 1)); + assertEquals(4, matrix3d.getLong(0, 0, 4)); + assertEquals(2, matrix3d.getLong(0, 1, 2)); + + matrix3d.elements(1).forEach(vector -> + vector.set(NdArrays.vectorOf(5L, 6L, 7L, 8L, 9L)) + ); + + assertEquals(5, matrix3d.getLong(0, 0, 0)); + assertEquals(6, matrix3d.getLong(0, 0, 1)); + assertEquals(9, matrix3d.getLong(0, 0, 4)); + assertEquals(7, matrix3d.getLong(0, 1, 2)); } } diff --git a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/NdArrayTestBase.java b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/NdArrayTestBase.java index 043c49e4aa8..66d1c98d6af 100644 --- a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/NdArrayTestBase.java +++ b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/NdArrayTestBase.java @@ -31,7 +31,6 @@ import java.nio.BufferOverflowException; import java.nio.BufferUnderflowException; -import java.util.stream.LongStream; import org.junit.Test; import org.tensorflow.tools.Shape; import org.tensorflow.tools.buffer.DataBuffer; @@ -54,9 +53,9 @@ public void shapeAndSizes() { NdArray scalar = allocate(scalarShape); assertEquals(scalarShape, scalar.shape()); assertEquals(0, scalar.rank()); - assertEquals(scalarShape, Shape.make()); + assertEquals(scalarShape, Shape.of()); - Shape vectorShape = Shape.make(10); + Shape vectorShape = Shape.of(10); NdArray vector = allocate(vectorShape); assertEquals(vectorShape, vector.shape()); assertEquals(1, vector.rank()); @@ -64,7 +63,7 @@ public void shapeAndSizes() { @Test public void setAndGetValues() { - NdArray matrix = allocate(Shape.make(5, 4)); + NdArray matrix = allocate(Shape.of(5, 4)); assertEquals(zeroOrNull(), matrix.getObject(3, 3)); matrix.setObject(valueOf(10L), 3, 3); @@ -94,7 +93,7 @@ public void setAndGetValues() { // as expected } - NdArray matrix2 = allocate(Shape.make(3, 2)) + NdArray matrix2 = allocate(Shape.of(3, 2)) .set(vectorOfObjects(valueOf(1L), valueOf(2L)), 0) .set(vectorOfObjects(valueOf(3L), valueOf(4L)), 1) .setObject(valueOf(5L), 2, 0) @@ -110,7 +109,7 @@ public void setAndGetValues() { @Test public void iterateElements() { - NdArray matrix3d = allocate(Shape.make(5, 4, 5)); + NdArray matrix3d = allocate(Shape.of(5, 4, 5)); matrix3d.scalars().forEachIndexed((coords, scalar) -> { scalar.setObject(valueOf(coords[2])); @@ -162,7 +161,7 @@ public void iterateElements() { @Test public void slices() { - NdArray matrix3d = allocate(Shape.make(5, 4, 5)); + NdArray matrix3d = allocate(Shape.of(5, 4, 5)); T val100 = valueOf(100L); matrix3d.setObject(val100, 1, 0, 0); @@ -171,7 +170,7 @@ public void slices() { // Vector (1,0,*) NdArray vector10X = matrix3d.get(1, 0); - assertEquals(Shape.make(5), vector10X.shape()); + assertEquals(Shape.of(5), vector10X.shape()); assertEquals(val100, vector10X.getObject(0)); assertEquals(val101, vector10X.getObject(1)); @@ -182,7 +181,7 @@ public void slices() { // Vector (*,0,0) NdArray vectorX00 = matrix3d.slice(all(), at(0), at(0)); - assertEquals(Shape.make(5), vectorX00.shape()); + assertEquals(Shape.of(5), vectorX00.shape()); assertEquals(val100, vectorX00.getObject(1)); T val200 = valueOf(200L); vectorX00.setObject(val200, 2); @@ -191,60 +190,60 @@ public void slices() { // Vector (1,0,[2,0]) NdArray vector10_20 = matrix3d.slice(at(1), at(0), seq(2, 0)); - assertEquals(vector10_20.shape(), Shape.make(2)); + assertEquals(vector10_20.shape(), Shape.of(2)); assertEquals(val102, vector10_20.getObject(0)); assertEquals(val100, vector10_20.getObject(1)); // Vector (1,0,[even]) NdArray vector10_even = matrix3d.slice(at(1), at(0), even()); - assertEquals(vector10_even.shape(), Shape.make(3)); + assertEquals(vector10_even.shape(), Shape.of(3)); assertEquals(val100, vector10_even.getObject(0)); assertEquals(val102, vector10_even.getObject(1)); // Vector ([odd]) from vector (1,0,[even]) NdArray vector10_even_odd = vector10_even.slice(odd()); - assertEquals(vector10_even_odd.shape(), Shape.make(1)); + assertEquals(vector10_even_odd.shape(), Shape.of(1)); assertEquals(val102, vector10_even_odd.getObject(0)); // Vector (1,0,[flip]) NdArray vector10_flip = matrix3d.slice(at(1), at(0), flip()); - assertEquals(vector10_flip.shape(), Shape.make(5)); + assertEquals(vector10_flip.shape(), Shape.of(5)); assertEquals(val100, vector10_flip.getObject(4)); assertEquals(val101, vector10_flip.getObject(3)); // Vector (1,0,[from 1]) from vector (1,0,*) NdArray vector10_1toX = vector10X.slice(from(1)); - assertEquals(vector10_1toX.shape(), Shape.make(4)); + assertEquals(vector10_1toX.shape(), Shape.of(4)); assertEquals(val101, vector10_1toX.getObject(0)); assertEquals(val102, vector10_1toX.getObject(1)); // Vector (1,0,[to 1]) from vector (1,0,*) NdArray vector10_Xto1 = vector10X.slice(to(2)); - assertEquals(vector10_Xto1.shape(), Shape.make(2)); + assertEquals(vector10_Xto1.shape(), Shape.of(2)); assertEquals(val100, vector10_Xto1.getObject(0)); assertEquals(val101, vector10_Xto1.getObject(1)); // Vector (1,0,[1 to 3]) NdArray vector10_1to3 = matrix3d.slice(at(1), at(0), range(1, 3)); - assertEquals(vector10_1to3.shape(), Shape.make(2)); + assertEquals(vector10_1to3.shape(), Shape.of(2)); assertEquals(val101, vector10_1to3.getObject(0)); assertEquals(val102, vector10_1to3.getObject(1)); // Scalar (1,0,0) from vector (1,0,*) NdArray scalar100 = vector10X.get(0); - assertEquals(Shape.make(), scalar100.shape()); + assertEquals(Shape.of(), scalar100.shape()); assertEquals(val100, scalar100.getObject()); // Slice scalar (1,0,z) LongNdArray z = NdArrays.scalarOf(2L); NdArray scalar102 = matrix3d.slice(at(1), at(0), at(z)); - assertEquals(scalar102.shape(), Shape.make()); + assertEquals(scalar102.shape(), Shape.of()); assertEquals(val102, scalar102.getObject()); // Slicing the 3D matrix so we only keep the first element of the second dimension NdArray matrix_X0Z = matrix3d.slice(all(), at(0)); assertEquals(2, matrix_X0Z.rank()); - assertEquals(Shape.make(5, 5), matrix_X0Z.shape()); + assertEquals(Shape.of(5, 5), matrix_X0Z.shape()); assertEquals(val100, matrix_X0Z.getObject(1, 0)); assertEquals(val101, matrix_X0Z.getObject(1, 1)); assertEquals(val200, matrix_X0Z.getObject(2, 0)); @@ -256,7 +255,7 @@ public void writeAndReadWithBuffers() { for (long val = 0L; val < buffer.size(); ++val) { buffer.setObject(valueOf(val), val); } - NdArray matrix = allocate(Shape.make(3, 5)); + NdArray matrix = allocate(Shape.of(3, 5)); matrix.write(buffer); assertEquals(valueOf(0L), matrix.getObject(0, 0)); assertEquals(valueOf(4L), matrix.getObject(0, 4)); @@ -271,17 +270,30 @@ public void writeAndReadWithBuffers() { assertEquals(valueOf(100L), buffer.getObject(5)); assertEquals(valueOf(10L), buffer.getObject(10)); assertEquals(valueOf(14L), buffer.getObject(14)); + + try { + matrix.write(buffer.narrow(10)); + fail(); + } catch (BufferUnderflowException e) { + // as expected + } + try { + matrix.read(buffer.narrow(10)); + fail(); + } catch (BufferOverflowException e) { + // as expected + } } @Test public void ndArrayCopies() { - NdArray matrixA = allocate(Shape.make(3, 5)); + NdArray matrixA = allocate(Shape.of(3, 5)); long value = 0L; for (NdArray s : matrixA.scalars()) { s.setObject(valueOf(value++)); } - NdArray matrixB = allocate(Shape.make(3, 5)).setObject(valueOf(100L), 1, 0); + NdArray matrixB = allocate(Shape.of(3, 5)).setObject(valueOf(100L), 1, 0); matrixA.copyTo(matrixB); assertEquals(valueOf(0L), matrixB.getObject(0, 0)); assertEquals(valueOf(4L), matrixB.getObject(0, 4)); @@ -289,7 +301,7 @@ public void ndArrayCopies() { assertEquals(valueOf(10L), matrixB.getObject(2, 0)); assertEquals(valueOf(14L), matrixB.getObject(2, 4)); - NdArray matrixC = allocate(Shape.make(3, 4)); + NdArray matrixC = allocate(Shape.of(3, 4)); try { matrixA.copyTo(matrixC); fail(); @@ -297,88 +309,4 @@ public void ndArrayCopies() { // as expected } } - - @Test - @SuppressWarnings("unchecked") - public void writeAndReadWithArrays() { - T[] values = (T[])LongStream.range(0L, 16L).boxed().map(this::valueOf).toArray(); - - NdArray matrix = allocate(Shape.make(3, 4)); - matrix.write(values); - assertEquals(valueOf(0L), matrix.getObject(0, 0)); - assertEquals(valueOf(3L), matrix.getObject(0, 3)); - assertEquals(valueOf(4L), matrix.getObject(1, 0)); - assertEquals(valueOf(11L), matrix.getObject(2, 3)); - - matrix.write(values, 4); - assertEquals(valueOf(4L), matrix.getObject(0, 0)); - assertEquals(valueOf(7L), matrix.getObject(0, 3)); - assertEquals(valueOf(8L), matrix.getObject(1, 0)); - assertEquals(valueOf(15L), matrix.getObject(2, 3)); - - matrix.setObject(valueOf(100L), 1, 0); - matrix.read(values, 2); - assertEquals(valueOf(4L), values[2]); - assertEquals(valueOf(7L), values[5]); - assertEquals(valueOf(100L), values[6]); - assertEquals(valueOf(15L), values[13]); - assertEquals(valueOf(15L), values[15]); - - matrix.read(values); - assertEquals(valueOf(4L), values[0]); - assertEquals(valueOf(7L), values[3]); - assertEquals(valueOf(100L), values[4]); - assertEquals(valueOf(15L), values[11]); - assertEquals(valueOf(15L), values[13]); - assertEquals(valueOf(15L), values[15]); - - try { - matrix.write((T[])LongStream.range(0L, 4L).boxed().map(this::valueOf).toArray()); - fail(); - } catch (BufferUnderflowException e) { - // as expected - } - try { - matrix.write(values, values.length); - fail(); - } catch (BufferUnderflowException e) { - // as expected - } - try { - matrix.write(values, -1); - fail(); - } catch (IllegalArgumentException e) { - // as expected - } - try { - matrix.write(values, values.length + 1); - fail(); - } catch (IllegalArgumentException e) { - // as expected - } - try { - matrix.read((T[])LongStream.range(0L, 4L).boxed().map(this::valueOf).toArray()); - fail(); - } catch (BufferOverflowException e) { - // as expected - } - try { - matrix.read(values, values.length); - fail(); - } catch (BufferOverflowException e) { - // as expected - } - try { - matrix.read(values, -1); - fail(); - } catch (IllegalArgumentException e) { - // as expected - } - try { - matrix.read(values, values.length + 1); - fail(); - } catch (IllegalArgumentException e) { - // as expected - } - } } diff --git a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/ShortNdArrayTestBase.java b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/ShortNdArrayTestBase.java index 7c5ab8661d2..f1504c6ef32 100644 --- a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/ShortNdArrayTestBase.java +++ b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/ShortNdArrayTestBase.java @@ -17,10 +17,7 @@ package org.tensorflow.tools.ndarray; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; -import java.nio.BufferOverflowException; -import java.nio.BufferUnderflowException; import org.junit.Test; import org.tensorflow.tools.Shape; @@ -36,107 +33,24 @@ protected Short valueOf(Long val) { @Test public void iteratePrimitiveElements() { - ShortNdArray matrix3d = allocate(Shape.make(5, 4, 5)); + ShortNdArray matrix3d = allocate(Shape.of(5, 4, 5)); - matrix3d.scalars().forEachIndexed((coords, scalar) -> { - scalar.setShort((short)coords[2]); - }); + matrix3d.scalars().forEachIndexed((coords, scalar) -> + scalar.setShort((short)coords[2]) + ); assertEquals(0, matrix3d.getShort(0, 0, 0)); assertEquals(1, matrix3d.getShort(0, 0, 1)); assertEquals(4, matrix3d.getShort(0, 0, 4)); assertEquals(2, matrix3d.getShort(0, 1, 2)); - matrix3d.elements(1).forEach(vector -> { - vector.set(NdArrays.vectorOf((short)5, (short)6, (short)7, (short)8, (short)9)); - }); + matrix3d.elements(1).forEach(vector -> + vector.set(NdArrays.vectorOf((short)5, (short)6, (short)7, (short)8, (short)9)) + ); assertEquals(5, matrix3d.getShort(0, 0, 0)); assertEquals(6, matrix3d.getShort(0, 0, 1)); assertEquals(9, matrix3d.getShort(0, 0, 4)); assertEquals(7, matrix3d.getShort(0, 1, 2)); } - - @Test - public void writeAndReadWithPrimitiveArrays() { - short[] values = new short[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; - - ShortNdArray matrix = allocate(Shape.make(3, 4)); - matrix.write(values); - assertEquals(0, matrix.getShort(0, 0)); - assertEquals(3, matrix.getShort(0, 3)); - assertEquals(4, matrix.getShort(1, 0)); - assertEquals(11, matrix.getShort(2, 3)); - - matrix.write(values, 4); - assertEquals(4, matrix.getShort(0, 0)); - assertEquals(7, matrix.getShort(0, 3)); - assertEquals(8, matrix.getShort(1, 0)); - assertEquals(15, matrix.getShort(2, 3)); - - matrix.setShort((short)100, 1, 0); - matrix.read(values, 2); - assertEquals(4, values[2]); - assertEquals(7, values[5]); - assertEquals(100, values[6]); - assertEquals(15, values[13]); - assertEquals(15, values[15]); - - matrix.read(values); - assertEquals(4, values[0]); - assertEquals(7, values[3]); - assertEquals(100, values[4]); - assertEquals(15, values[11]); - assertEquals(15, values[13]); - assertEquals(15, values[15]); - - try { - matrix.write(new short[] { 1, 2, 3, 4 }); - fail(); - } catch (BufferUnderflowException e) { - // as expected - } - try { - matrix.write(values, values.length); - fail(); - } catch (BufferUnderflowException e) { - // as expected - } - try { - matrix.write(values, -1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.write(values, values.length + 1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.read(new short[4]); - fail(); - } catch (BufferOverflowException e) { - // as expected - } - try { - matrix.read(values, values.length); - fail(); - } catch (BufferOverflowException e) { - // as expected - } - try { - matrix.read(values, -1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - try { - matrix.read(values, values.length + 1); - fail(); - } catch (IndexOutOfBoundsException e) { - // as expected - } - } } diff --git a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/StdArraysTest.java b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/StdArraysTest.java new file mode 100644 index 00000000000..e65660bffd0 --- /dev/null +++ b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/StdArraysTest.java @@ -0,0 +1,117 @@ +package org.tensorflow.tools.ndarray; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import org.junit.Test; +import org.tensorflow.tools.Shape; + +public class StdArraysTest { + + @Test + public void initVector() { + IntNdArray vector = NdArrays.ofInts(Shape.of(2)); + + StdArrays.copyTo(vector, new int[] {1, 2}); + assertEquals(1, vector.getInt(0)); + assertEquals(2, vector.getInt(1)); + + try { + StdArrays.copyTo(vector, new int[] {1, 2, 3}); + fail(); + } catch (IllegalArgumentException e) { + // as expected + } + try { + StdArrays.copyTo(NdArrays.ofInts(Shape.of(4)), new int[] {1, 2}); + fail(); + } catch (IllegalArgumentException e) { + // as expected + } + try { + StdArrays.copyTo(NdArrays.ofInts(Shape.of(2, 2)), new int[] {1, 2}); + fail(); + } catch (IllegalArgumentException e) { + // as expected + } + } + + @Test + public void initMatrix() { + IntNdArray matrix = NdArrays.ofInts(Shape.of(2, 2)); + + StdArrays.copyTo(matrix, new int[][] { + {1, 2}, + {3, 4} + }); + assertEquals(1, matrix.getInt(0, 0)); + assertEquals(2, matrix.getInt(0, 1)); + assertEquals(3, matrix.getInt(1, 0)); + assertEquals(4, matrix.getInt(1, 1)); + try { + StdArrays.copyTo(matrix, new int[][] {{1, 2, 3}, {4, 5, 6}}); + fail(); + } catch (IllegalArgumentException e) { + // as expected + } + try { + StdArrays.copyTo(NdArrays.ofInts(Shape.of(3, 3)), new int[][] {{1, 2}, {3, 4}}); + fail(); + } catch (IllegalArgumentException e) { + // as expected + } + try { + StdArrays.copyTo(NdArrays.ofInts(Shape.of(2, 2, 1)), new int[][] {{1, 2}, {3, 4}}); + fail(); + } catch (IllegalArgumentException e) { + // as expected + } + } + + @Test + public void cannotInitDenseMatrixWithRaggedArray() { + IntNdArray matrix = NdArrays.ofInts(Shape.of(2, 2)); + try { + StdArrays.copyTo(matrix, new int[][]{ + {1, 2}, + {3} + }); + fail(); + } catch (IllegalArgumentException e) { + // as expected + } + } + + @Test + public void computeShapeDense3DMatrix() { + Shape shape = StdArrays.shapeOf(new int[][][] { + { + {1, 2, 3}, {4, 5, 6} + }, + { + {1, 2, 3}, {4, 5, 6} + } + }); + assertArrayEquals(new long[] {2, 2, 3}, shape.asArray()); + } + + @Test + public void shapeOfRagged3DMatrix() { + Shape shape = StdArrays.shapeOf(new int[][][] { + { + {1, 2, 3}, {4, 5, 6}, {7, 8, 9} + }, + { + {1, 2, 3}, {4, 5, 6} + } + }); + assertArrayEquals(new long[] {2, Shape.UNKNOWN_SIZE, 3}, shape.asArray()); + } + + @Test + public void shapeOfEmptyArray() { + Shape shape = StdArrays.shapeOf(new int[2][2][3]); + assertArrayEquals(new long[] {2, 2, 3}, shape.asArray()); + } +} diff --git a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/impl/sequence/ElementSequenceTest.java b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/impl/sequence/ElementSequenceTest.java index 5c263462c61..945e41c2f7d 100644 --- a/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/impl/sequence/ElementSequenceTest.java +++ b/tensorflow-tools/src/test/java/org/tensorflow/tools/ndarray/impl/sequence/ElementSequenceTest.java @@ -34,7 +34,7 @@ public class ElementSequenceTest { @Test public void iterateVectorsWithIndex() { - IntNdArray array = NdArrays.ofInts(Shape.make(2, 3, 2)); + IntNdArray array = NdArrays.ofInts(Shape.of(2, 3, 2)); @SuppressWarnings("unchecked") NdArraySequence sequence = ElementSequence @@ -53,7 +53,7 @@ public void iterateVectorsWithIndex() { @Test public void iterateScalarsWithIndex() { - IntNdArray array = NdArrays.ofInts(Shape.make(2, 3, 2)); + IntNdArray array = NdArrays.ofInts(Shape.of(2, 3, 2)); @SuppressWarnings("unchecked") NdArraySequence cursor = ElementSequence.create((AbstractNdArray)array, 2);