Add a new type `tensor_t` in prelude that represents a tensor with variable rank…. And tensor array is a list of `tensor_t`.
````OCaml
type tensor_t =
| tensor0 of TensorType([])
| tensor1 of TensorType([Any()])
| tensor2 of TensorType([Any(), Any()])
| tensor3 of TensorType([Any(), Any(), Any()])
| tensor4 of TensorType([Any(), Any(), Any(), Any()])
| tensor5 of TensorType([Any(), Any(), Any(), Any(), Any()])
| tensor6 of TensorType([Any(), Any(), Any(), Any(), Any(), Any()])
type tensor_array = list tensor_t
// build a tensor array with size n
let tensor_array n =
match n with
| 0 -> nil()
| x -> cons(tensor_nil(), tensor_array(x-1))
// read nth element from ta
let tensor_array_read ta n = nth ta n
// write v to nth position of ta
let tensor_array_write ta n v = update ta n v
// concatenate two tensor_t
let tensor_concatenate a b =
match (a,b) with
| tensor1(t1), tensor1(t2) -> tensor1(op.concat(t1, t2))
| tensor2(t1), tensor2(t2) -> tensor2(op.concat(t1, t2))
| tensor3(t1), tensor3(t2) -> tensor3(op.concat(t1, t2))
| tensor4(t1), tensor4(t2) -> tensor4(op.concat(t1, t2))
// grow the tensor rank by 1
let tensor_add_one t =
match t with
| tensor0 of tt -> tensor1(expand_dims(tt))
| tensor1 of tt -> tensor2(expand_dims(tt))
| tensor2 of tt -> tensor3(expand_dims(tt))
| tensor3 of tt -> tensor4(expand_dims(tt))
| tensor4 of tt -> tensor5(expand_dims(tt))
| tensor5 of tt -> tensor6(expand_dims(tt))
// return the values in tensor array as stacked tensor
let tensor_array_stack ta =
let tensors_add_one = map(tensor_add_one, ta) in
fodl(tensor_concatenate, hd(tensors_add_one), tl(tensors_add_one)))
// return tensor array size
let tensor_array_size ta = length ta
// (tensor_array -> TensorType([any()]) -> tensor_array -> tensor_array)
let tensor_array_scatter ta indices values =
let helper ta current limit indices values =
if (current == limit) {
ta
} else {
helper(tensor_array_write(ta, op.take(indices, current),tensor_array_read(values, current)),
current+1, limit, indices, values)
}
in
let indices_shape = op.shape_of(indices) in
let limit = op.take(indices_shape, 0) in
helper(ta, 0, limit, indices, values)
let tensor_array_gather ta indices =
let helper ta accu current limit indices =
if (current == 0) {
tensor_array_stack(accu)
} else {
helper(ta, cons(tensor_array_read(ta, op.take(indices, current-1)), accu), current-1, limit, indices)
}
in
let indices_shape = op.shape_of(ta) in
let limit = op.take(indices_shape, 0) in
helper(ta, nil(), limit, limit, indices)
let tensor_array_split ta value lengths =
let helper ta1 value1 offset current1 limit1 lengths1 =
if (current1 == limit1) {
ta1
} else {
tensor_array_write(helper(ta1, value1, offset1 + op.take(lengths1, current1), current1+1, limit1, lengths1), current1, tensor_take(value1, offset1, offset1 + op.take(lengths1, current1)))
}
in
let lengths_shape = op.shape_of(lengths) in
let lengths_limit = op.take(lengths_shape, 0) in
helper(ta, value, 0, 0, lengths_limit, lengths)
let tensor_arrray_concat ta =
match ta with
| nil() -> tensor_nil()
| cons(hd, nil()) -> hd
| cons(hd, cons as tl) -> tensor_concatenate(hd, tensor_array_concat(tl))
````
This PR depends on https://github.com/dmlc/tvm/pull/3606.
Todo:
- [x] constructor
- [x] read
- [x] write
- [x] stack
- [x] unstack
- [x] size
- [x] scatter
- [x] gather
- [x] split
- [x] concat
- [x] more tests