blob: 5d12d4e362769aee6fcd8edef752dd520ef84877 [file] [log] [blame]
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#![allow(dead_code)]
#![allow(unused_imports)]
use crate::ipc::gen::Schema::*;
use crate::ipc::gen::Tensor::*;
use flatbuffers::EndianScalar;
use std::{cmp::Ordering, mem};
// automatically generated by the FlatBuffers compiler, do not modify
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
pub const ENUM_MIN_SPARSE_MATRIX_COMPRESSED_AXIS: i16 = 0;
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
pub const ENUM_MAX_SPARSE_MATRIX_COMPRESSED_AXIS: i16 = 1;
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
#[allow(non_camel_case_types)]
pub const ENUM_VALUES_SPARSE_MATRIX_COMPRESSED_AXIS: [SparseMatrixCompressedAxis; 2] = [
SparseMatrixCompressedAxis::Row,
SparseMatrixCompressedAxis::Column,
];
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(transparent)]
pub struct SparseMatrixCompressedAxis(pub i16);
#[allow(non_upper_case_globals)]
impl SparseMatrixCompressedAxis {
pub const Row: Self = Self(0);
pub const Column: Self = Self(1);
pub const ENUM_MIN: i16 = 0;
pub const ENUM_MAX: i16 = 1;
pub const ENUM_VALUES: &'static [Self] = &[Self::Row, Self::Column];
/// Returns the variant's name or "" if unknown.
pub fn variant_name(self) -> Option<&'static str> {
match self {
Self::Row => Some("Row"),
Self::Column => Some("Column"),
_ => None,
}
}
}
impl std::fmt::Debug for SparseMatrixCompressedAxis {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(name) = self.variant_name() {
f.write_str(name)
} else {
f.write_fmt(format_args!("<UNKNOWN {:?}>", self.0))
}
}
}
impl<'a> flatbuffers::Follow<'a> for SparseMatrixCompressedAxis {
type Inner = Self;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
let b = flatbuffers::read_scalar_at::<i16>(buf, loc);
Self(b)
}
}
impl flatbuffers::Push for SparseMatrixCompressedAxis {
type Output = SparseMatrixCompressedAxis;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
flatbuffers::emplace_scalar::<i16>(dst, self.0);
}
}
impl flatbuffers::EndianScalar for SparseMatrixCompressedAxis {
#[inline]
fn to_little_endian(self) -> Self {
let b = i16::to_le(self.0);
Self(b)
}
#[inline]
fn from_little_endian(self) -> Self {
let b = i16::from_le(self.0);
Self(b)
}
}
impl<'a> flatbuffers::Verifiable for SparseMatrixCompressedAxis {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
i16::run_verifier(v, pos)
}
}
impl flatbuffers::SimpleToVerifyInSlice for SparseMatrixCompressedAxis {}
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
pub const ENUM_MIN_SPARSE_TENSOR_INDEX: u8 = 0;
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
pub const ENUM_MAX_SPARSE_TENSOR_INDEX: u8 = 3;
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
#[allow(non_camel_case_types)]
pub const ENUM_VALUES_SPARSE_TENSOR_INDEX: [SparseTensorIndex; 4] = [
SparseTensorIndex::NONE,
SparseTensorIndex::SparseTensorIndexCOO,
SparseTensorIndex::SparseMatrixIndexCSX,
SparseTensorIndex::SparseTensorIndexCSF,
];
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(transparent)]
pub struct SparseTensorIndex(pub u8);
#[allow(non_upper_case_globals)]
impl SparseTensorIndex {
pub const NONE: Self = Self(0);
pub const SparseTensorIndexCOO: Self = Self(1);
pub const SparseMatrixIndexCSX: Self = Self(2);
pub const SparseTensorIndexCSF: Self = Self(3);
pub const ENUM_MIN: u8 = 0;
pub const ENUM_MAX: u8 = 3;
pub const ENUM_VALUES: &'static [Self] = &[
Self::NONE,
Self::SparseTensorIndexCOO,
Self::SparseMatrixIndexCSX,
Self::SparseTensorIndexCSF,
];
/// Returns the variant's name or "" if unknown.
pub fn variant_name(self) -> Option<&'static str> {
match self {
Self::NONE => Some("NONE"),
Self::SparseTensorIndexCOO => Some("SparseTensorIndexCOO"),
Self::SparseMatrixIndexCSX => Some("SparseMatrixIndexCSX"),
Self::SparseTensorIndexCSF => Some("SparseTensorIndexCSF"),
_ => None,
}
}
}
impl std::fmt::Debug for SparseTensorIndex {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(name) = self.variant_name() {
f.write_str(name)
} else {
f.write_fmt(format_args!("<UNKNOWN {:?}>", self.0))
}
}
}
pub struct SparseTensorIndexUnionTableOffset {}
impl<'a> flatbuffers::Follow<'a> for SparseTensorIndex {
type Inner = Self;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
let b = flatbuffers::read_scalar_at::<u8>(buf, loc);
Self(b)
}
}
impl flatbuffers::Push for SparseTensorIndex {
type Output = SparseTensorIndex;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
flatbuffers::emplace_scalar::<u8>(dst, self.0);
}
}
impl flatbuffers::EndianScalar for SparseTensorIndex {
#[inline]
fn to_little_endian(self) -> Self {
let b = u8::to_le(self.0);
Self(b)
}
#[inline]
fn from_little_endian(self) -> Self {
let b = u8::from_le(self.0);
Self(b)
}
}
impl<'a> flatbuffers::Verifiable for SparseTensorIndex {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
u8::run_verifier(v, pos)
}
}
impl flatbuffers::SimpleToVerifyInSlice for SparseTensorIndex {}
pub enum SparseTensorIndexCOOOffset {}
#[derive(Copy, Clone, PartialEq)]
/// ----------------------------------------------------------------------
/// EXPERIMENTAL: Data structures for sparse tensors
/// Coordinate (COO) format of sparse tensor index.
///
/// COO's index list are represented as a NxM matrix,
/// where N is the number of non-zero values,
/// and M is the number of dimensions of a sparse tensor.
///
/// indicesBuffer stores the location and size of the data of this indices
/// matrix. The value type and the stride of the indices matrix is
/// specified in indicesType and indicesStrides fields.
///
/// For example, let X be a 2x3x4x5 tensor, and it has the following
/// 6 non-zero values:
/// ```text
/// X[0, 1, 2, 0] := 1
/// X[1, 1, 2, 3] := 2
/// X[0, 2, 1, 0] := 3
/// X[0, 1, 3, 0] := 4
/// X[0, 1, 2, 1] := 5
/// X[1, 2, 0, 4] := 6
/// ```
/// In COO format, the index matrix of X is the following 4x6 matrix:
/// ```text
/// [[0, 0, 0, 0, 1, 1],
/// [1, 1, 1, 2, 1, 2],
/// [2, 2, 3, 1, 2, 0],
/// [0, 1, 0, 0, 3, 4]]
/// ```
/// When isCanonical is true, the indices is sorted in lexicographical order
/// (row-major order), and it does not have duplicated entries. Otherwise,
/// the indices may not be sorted, or may have duplicated entries.
pub struct SparseTensorIndexCOO<'a> {
pub _tab: flatbuffers::Table<'a>,
}
impl<'a> flatbuffers::Follow<'a> for SparseTensorIndexCOO<'a> {
type Inner = SparseTensorIndexCOO<'a>;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
Self {
_tab: flatbuffers::Table { buf, loc },
}
}
}
impl<'a> SparseTensorIndexCOO<'a> {
#[inline]
pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
SparseTensorIndexCOO { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>,
args: &'args SparseTensorIndexCOOArgs<'args>,
) -> flatbuffers::WIPOffset<SparseTensorIndexCOO<'bldr>> {
let mut builder = SparseTensorIndexCOOBuilder::new(_fbb);
if let Some(x) = args.indicesBuffer {
builder.add_indicesBuffer(x);
}
if let Some(x) = args.indicesStrides {
builder.add_indicesStrides(x);
}
if let Some(x) = args.indicesType {
builder.add_indicesType(x);
}
builder.add_isCanonical(args.isCanonical);
builder.finish()
}
pub const VT_INDICESTYPE: flatbuffers::VOffsetT = 4;
pub const VT_INDICESSTRIDES: flatbuffers::VOffsetT = 6;
pub const VT_INDICESBUFFER: flatbuffers::VOffsetT = 8;
pub const VT_ISCANONICAL: flatbuffers::VOffsetT = 10;
/// The type of values in indicesBuffer
#[inline]
pub fn indicesType(&self) -> Int<'a> {
self._tab
.get::<flatbuffers::ForwardsUOffset<Int>>(
SparseTensorIndexCOO::VT_INDICESTYPE,
None,
)
.unwrap()
}
/// Non-negative byte offsets to advance one value cell along each dimension
/// If omitted, default to row-major order (C-like).
#[inline]
pub fn indicesStrides(&self) -> Option<flatbuffers::Vector<'a, i64>> {
self._tab
.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, i64>>>(
SparseTensorIndexCOO::VT_INDICESSTRIDES,
None,
)
}
/// The location and size of the indices matrix's data
#[inline]
pub fn indicesBuffer(&self) -> &'a Buffer {
self._tab
.get::<Buffer>(SparseTensorIndexCOO::VT_INDICESBUFFER, None)
.unwrap()
}
/// This flag is true if and only if the indices matrix is sorted in
/// row-major order, and does not have duplicated entries.
/// This sort order is the same as of Tensorflow's SparseTensor,
/// but it is inverse order of SciPy's canonical coo_matrix
/// (SciPy employs column-major order for its coo_matrix).
#[inline]
pub fn isCanonical(&self) -> bool {
self._tab
.get::<bool>(SparseTensorIndexCOO::VT_ISCANONICAL, Some(false))
.unwrap()
}
}
impl flatbuffers::Verifiable for SparseTensorIndexCOO<'_> {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
v.visit_table(pos)?
.visit_field::<flatbuffers::ForwardsUOffset<Int>>(
&"indicesType",
Self::VT_INDICESTYPE,
true,
)?
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, i64>>>(
&"indicesStrides",
Self::VT_INDICESSTRIDES,
false,
)?
.visit_field::<Buffer>(&"indicesBuffer", Self::VT_INDICESBUFFER, true)?
.visit_field::<bool>(&"isCanonical", Self::VT_ISCANONICAL, false)?
.finish();
Ok(())
}
}
pub struct SparseTensorIndexCOOArgs<'a> {
pub indicesType: Option<flatbuffers::WIPOffset<Int<'a>>>,
pub indicesStrides: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, i64>>>,
pub indicesBuffer: Option<&'a Buffer>,
pub isCanonical: bool,
}
impl<'a> Default for SparseTensorIndexCOOArgs<'a> {
#[inline]
fn default() -> Self {
SparseTensorIndexCOOArgs {
indicesType: None, // required field
indicesStrides: None,
indicesBuffer: None, // required field
isCanonical: false,
}
}
}
pub struct SparseTensorIndexCOOBuilder<'a: 'b, 'b> {
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>,
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
}
impl<'a: 'b, 'b> SparseTensorIndexCOOBuilder<'a, 'b> {
#[inline]
pub fn add_indicesType(&mut self, indicesType: flatbuffers::WIPOffset<Int<'b>>) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Int>>(
SparseTensorIndexCOO::VT_INDICESTYPE,
indicesType,
);
}
#[inline]
pub fn add_indicesStrides(
&mut self,
indicesStrides: flatbuffers::WIPOffset<flatbuffers::Vector<'b, i64>>,
) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(
SparseTensorIndexCOO::VT_INDICESSTRIDES,
indicesStrides,
);
}
#[inline]
pub fn add_indicesBuffer(&mut self, indicesBuffer: &Buffer) {
self.fbb_.push_slot_always::<&Buffer>(
SparseTensorIndexCOO::VT_INDICESBUFFER,
indicesBuffer,
);
}
#[inline]
pub fn add_isCanonical(&mut self, isCanonical: bool) {
self.fbb_.push_slot::<bool>(
SparseTensorIndexCOO::VT_ISCANONICAL,
isCanonical,
false,
);
}
#[inline]
pub fn new(
_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
) -> SparseTensorIndexCOOBuilder<'a, 'b> {
let start = _fbb.start_table();
SparseTensorIndexCOOBuilder {
fbb_: _fbb,
start_: start,
}
}
#[inline]
pub fn finish(self) -> flatbuffers::WIPOffset<SparseTensorIndexCOO<'a>> {
let o = self.fbb_.end_table(self.start_);
self.fbb_
.required(o, SparseTensorIndexCOO::VT_INDICESTYPE, "indicesType");
self.fbb_
.required(o, SparseTensorIndexCOO::VT_INDICESBUFFER, "indicesBuffer");
flatbuffers::WIPOffset::new(o.value())
}
}
impl std::fmt::Debug for SparseTensorIndexCOO<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut ds = f.debug_struct("SparseTensorIndexCOO");
ds.field("indicesType", &self.indicesType());
ds.field("indicesStrides", &self.indicesStrides());
ds.field("indicesBuffer", &self.indicesBuffer());
ds.field("isCanonical", &self.isCanonical());
ds.finish()
}
}
pub enum SparseMatrixIndexCSXOffset {}
#[derive(Copy, Clone, PartialEq)]
/// Compressed Sparse format, that is matrix-specific.
pub struct SparseMatrixIndexCSX<'a> {
pub _tab: flatbuffers::Table<'a>,
}
impl<'a> flatbuffers::Follow<'a> for SparseMatrixIndexCSX<'a> {
type Inner = SparseMatrixIndexCSX<'a>;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
Self {
_tab: flatbuffers::Table { buf, loc },
}
}
}
impl<'a> SparseMatrixIndexCSX<'a> {
#[inline]
pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
SparseMatrixIndexCSX { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>,
args: &'args SparseMatrixIndexCSXArgs<'args>,
) -> flatbuffers::WIPOffset<SparseMatrixIndexCSX<'bldr>> {
let mut builder = SparseMatrixIndexCSXBuilder::new(_fbb);
if let Some(x) = args.indicesBuffer {
builder.add_indicesBuffer(x);
}
if let Some(x) = args.indicesType {
builder.add_indicesType(x);
}
if let Some(x) = args.indptrBuffer {
builder.add_indptrBuffer(x);
}
if let Some(x) = args.indptrType {
builder.add_indptrType(x);
}
builder.add_compressedAxis(args.compressedAxis);
builder.finish()
}
pub const VT_COMPRESSEDAXIS: flatbuffers::VOffsetT = 4;
pub const VT_INDPTRTYPE: flatbuffers::VOffsetT = 6;
pub const VT_INDPTRBUFFER: flatbuffers::VOffsetT = 8;
pub const VT_INDICESTYPE: flatbuffers::VOffsetT = 10;
pub const VT_INDICESBUFFER: flatbuffers::VOffsetT = 12;
/// Which axis, row or column, is compressed
#[inline]
pub fn compressedAxis(&self) -> SparseMatrixCompressedAxis {
self._tab
.get::<SparseMatrixCompressedAxis>(
SparseMatrixIndexCSX::VT_COMPRESSEDAXIS,
Some(SparseMatrixCompressedAxis::Row),
)
.unwrap()
}
/// The type of values in indptrBuffer
#[inline]
pub fn indptrType(&self) -> Int<'a> {
self._tab
.get::<flatbuffers::ForwardsUOffset<Int>>(
SparseMatrixIndexCSX::VT_INDPTRTYPE,
None,
)
.unwrap()
}
/// indptrBuffer stores the location and size of indptr array that
/// represents the range of the rows.
/// The i-th row spans from `indptr[i]` to `indptr[i+1]` in the data.
/// The length of this array is 1 + (the number of rows), and the type
/// of index value is long.
///
/// For example, let X be the following 6x4 matrix:
/// ```text
/// X := [[0, 1, 2, 0],
/// [0, 0, 3, 0],
/// [0, 4, 0, 5],
/// [0, 0, 0, 0],
/// [6, 0, 7, 8],
/// [0, 9, 0, 0]].
/// ```
/// The array of non-zero values in X is:
/// ```text
/// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9].
/// ```
/// And the indptr of X is:
/// ```text
/// indptr(X) = [0, 2, 3, 5, 5, 8, 10].
/// ```
#[inline]
pub fn indptrBuffer(&self) -> &'a Buffer {
self._tab
.get::<Buffer>(SparseMatrixIndexCSX::VT_INDPTRBUFFER, None)
.unwrap()
}
/// The type of values in indicesBuffer
#[inline]
pub fn indicesType(&self) -> Int<'a> {
self._tab
.get::<flatbuffers::ForwardsUOffset<Int>>(
SparseMatrixIndexCSX::VT_INDICESTYPE,
None,
)
.unwrap()
}
/// indicesBuffer stores the location and size of the array that
/// contains the column indices of the corresponding non-zero values.
/// The type of index value is long.
///
/// For example, the indices of the above X is:
/// ```text
/// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1].
/// ```
/// Note that the indices are sorted in lexicographical order for each row.
#[inline]
pub fn indicesBuffer(&self) -> &'a Buffer {
self._tab
.get::<Buffer>(SparseMatrixIndexCSX::VT_INDICESBUFFER, None)
.unwrap()
}
}
impl flatbuffers::Verifiable for SparseMatrixIndexCSX<'_> {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
v.visit_table(pos)?
.visit_field::<SparseMatrixCompressedAxis>(
&"compressedAxis",
Self::VT_COMPRESSEDAXIS,
false,
)?
.visit_field::<flatbuffers::ForwardsUOffset<Int>>(
&"indptrType",
Self::VT_INDPTRTYPE,
true,
)?
.visit_field::<Buffer>(&"indptrBuffer", Self::VT_INDPTRBUFFER, true)?
.visit_field::<flatbuffers::ForwardsUOffset<Int>>(
&"indicesType",
Self::VT_INDICESTYPE,
true,
)?
.visit_field::<Buffer>(&"indicesBuffer", Self::VT_INDICESBUFFER, true)?
.finish();
Ok(())
}
}
pub struct SparseMatrixIndexCSXArgs<'a> {
pub compressedAxis: SparseMatrixCompressedAxis,
pub indptrType: Option<flatbuffers::WIPOffset<Int<'a>>>,
pub indptrBuffer: Option<&'a Buffer>,
pub indicesType: Option<flatbuffers::WIPOffset<Int<'a>>>,
pub indicesBuffer: Option<&'a Buffer>,
}
impl<'a> Default for SparseMatrixIndexCSXArgs<'a> {
#[inline]
fn default() -> Self {
SparseMatrixIndexCSXArgs {
compressedAxis: SparseMatrixCompressedAxis::Row,
indptrType: None, // required field
indptrBuffer: None, // required field
indicesType: None, // required field
indicesBuffer: None, // required field
}
}
}
pub struct SparseMatrixIndexCSXBuilder<'a: 'b, 'b> {
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>,
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
}
impl<'a: 'b, 'b> SparseMatrixIndexCSXBuilder<'a, 'b> {
#[inline]
pub fn add_compressedAxis(&mut self, compressedAxis: SparseMatrixCompressedAxis) {
self.fbb_.push_slot::<SparseMatrixCompressedAxis>(
SparseMatrixIndexCSX::VT_COMPRESSEDAXIS,
compressedAxis,
SparseMatrixCompressedAxis::Row,
);
}
#[inline]
pub fn add_indptrType(&mut self, indptrType: flatbuffers::WIPOffset<Int<'b>>) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Int>>(
SparseMatrixIndexCSX::VT_INDPTRTYPE,
indptrType,
);
}
#[inline]
pub fn add_indptrBuffer(&mut self, indptrBuffer: &Buffer) {
self.fbb_.push_slot_always::<&Buffer>(
SparseMatrixIndexCSX::VT_INDPTRBUFFER,
indptrBuffer,
);
}
#[inline]
pub fn add_indicesType(&mut self, indicesType: flatbuffers::WIPOffset<Int<'b>>) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Int>>(
SparseMatrixIndexCSX::VT_INDICESTYPE,
indicesType,
);
}
#[inline]
pub fn add_indicesBuffer(&mut self, indicesBuffer: &Buffer) {
self.fbb_.push_slot_always::<&Buffer>(
SparseMatrixIndexCSX::VT_INDICESBUFFER,
indicesBuffer,
);
}
#[inline]
pub fn new(
_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
) -> SparseMatrixIndexCSXBuilder<'a, 'b> {
let start = _fbb.start_table();
SparseMatrixIndexCSXBuilder {
fbb_: _fbb,
start_: start,
}
}
#[inline]
pub fn finish(self) -> flatbuffers::WIPOffset<SparseMatrixIndexCSX<'a>> {
let o = self.fbb_.end_table(self.start_);
self.fbb_
.required(o, SparseMatrixIndexCSX::VT_INDPTRTYPE, "indptrType");
self.fbb_
.required(o, SparseMatrixIndexCSX::VT_INDPTRBUFFER, "indptrBuffer");
self.fbb_
.required(o, SparseMatrixIndexCSX::VT_INDICESTYPE, "indicesType");
self.fbb_
.required(o, SparseMatrixIndexCSX::VT_INDICESBUFFER, "indicesBuffer");
flatbuffers::WIPOffset::new(o.value())
}
}
impl std::fmt::Debug for SparseMatrixIndexCSX<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut ds = f.debug_struct("SparseMatrixIndexCSX");
ds.field("compressedAxis", &self.compressedAxis());
ds.field("indptrType", &self.indptrType());
ds.field("indptrBuffer", &self.indptrBuffer());
ds.field("indicesType", &self.indicesType());
ds.field("indicesBuffer", &self.indicesBuffer());
ds.finish()
}
}
pub enum SparseTensorIndexCSFOffset {}
#[derive(Copy, Clone, PartialEq)]
/// Compressed Sparse Fiber (CSF) sparse tensor index.
pub struct SparseTensorIndexCSF<'a> {
pub _tab: flatbuffers::Table<'a>,
}
impl<'a> flatbuffers::Follow<'a> for SparseTensorIndexCSF<'a> {
type Inner = SparseTensorIndexCSF<'a>;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
Self {
_tab: flatbuffers::Table { buf, loc },
}
}
}
impl<'a> SparseTensorIndexCSF<'a> {
#[inline]
pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
SparseTensorIndexCSF { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>,
args: &'args SparseTensorIndexCSFArgs<'args>,
) -> flatbuffers::WIPOffset<SparseTensorIndexCSF<'bldr>> {
let mut builder = SparseTensorIndexCSFBuilder::new(_fbb);
if let Some(x) = args.axisOrder {
builder.add_axisOrder(x);
}
if let Some(x) = args.indicesBuffers {
builder.add_indicesBuffers(x);
}
if let Some(x) = args.indicesType {
builder.add_indicesType(x);
}
if let Some(x) = args.indptrBuffers {
builder.add_indptrBuffers(x);
}
if let Some(x) = args.indptrType {
builder.add_indptrType(x);
}
builder.finish()
}
pub const VT_INDPTRTYPE: flatbuffers::VOffsetT = 4;
pub const VT_INDPTRBUFFERS: flatbuffers::VOffsetT = 6;
pub const VT_INDICESTYPE: flatbuffers::VOffsetT = 8;
pub const VT_INDICESBUFFERS: flatbuffers::VOffsetT = 10;
pub const VT_AXISORDER: flatbuffers::VOffsetT = 12;
/// CSF is a generalization of compressed sparse row (CSR) index.
/// See [smith2017knl](http://shaden.io/pub-files/smith2017knl.pdf)
///
/// CSF index recursively compresses each dimension of a tensor into a set
/// of prefix trees. Each path from a root to leaf forms one tensor
/// non-zero index. CSF is implemented with two arrays of buffers and one
/// arrays of integers.
///
/// For example, let X be a 2x3x4x5 tensor and let it have the following
/// 8 non-zero values:
/// ```text
/// X[0, 0, 0, 1] := 1
/// X[0, 0, 0, 2] := 2
/// X[0, 1, 0, 0] := 3
/// X[0, 1, 0, 2] := 4
/// X[0, 1, 1, 0] := 5
/// X[1, 1, 1, 0] := 6
/// X[1, 1, 1, 1] := 7
/// X[1, 1, 1, 2] := 8
/// ```
/// As a prefix tree this would be represented as:
/// ```text
/// 0 1
/// / \ |
/// 0 1 1
/// / / \ |
/// 0 0 1 1
/// /| /| | /| |
/// 1 2 0 2 0 0 1 2
/// ```
/// The type of values in indptrBuffers
#[inline]
pub fn indptrType(&self) -> Int<'a> {
self._tab
.get::<flatbuffers::ForwardsUOffset<Int>>(
SparseTensorIndexCSF::VT_INDPTRTYPE,
None,
)
.unwrap()
}
/// indptrBuffers stores the sparsity structure.
/// Each two consecutive dimensions in a tensor correspond to a buffer in
/// indptrBuffers. A pair of consecutive values at `indptrBuffers[dim][i]`
/// and `indptrBuffers[dim][i + 1]` signify a range of nodes in
/// `indicesBuffers[dim + 1]` who are children of `indicesBuffers[dim][i]` node.
///
/// For example, the indptrBuffers for the above X is:
/// ```text
/// indptrBuffer(X) = [
/// [0, 2, 3],
/// [0, 1, 3, 4],
/// [0, 2, 4, 5, 8]
/// ].
/// ```
#[inline]
pub fn indptrBuffers(&self) -> &'a [Buffer] {
self._tab
.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, Buffer>>>(
SparseTensorIndexCSF::VT_INDPTRBUFFERS,
None,
)
.map(|v| v.safe_slice())
.unwrap()
}
/// The type of values in indicesBuffers
#[inline]
pub fn indicesType(&self) -> Int<'a> {
self._tab
.get::<flatbuffers::ForwardsUOffset<Int>>(
SparseTensorIndexCSF::VT_INDICESTYPE,
None,
)
.unwrap()
}
/// indicesBuffers stores values of nodes.
/// Each tensor dimension corresponds to a buffer in indicesBuffers.
/// For example, the indicesBuffers for the above X is:
/// ```text
/// indicesBuffer(X) = [
/// [0, 1],
/// [0, 1, 1],
/// [0, 0, 1, 1],
/// [1, 2, 0, 2, 0, 0, 1, 2]
/// ].
/// ```
#[inline]
pub fn indicesBuffers(&self) -> &'a [Buffer] {
self._tab
.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, Buffer>>>(
SparseTensorIndexCSF::VT_INDICESBUFFERS,
None,
)
.map(|v| v.safe_slice())
.unwrap()
}
/// axisOrder stores the sequence in which dimensions were traversed to
/// produce the prefix tree.
/// For example, the axisOrder for the above X is:
/// ```text
/// axisOrder(X) = [0, 1, 2, 3].
/// ```
#[inline]
pub fn axisOrder(&self) -> flatbuffers::Vector<'a, i32> {
self._tab
.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, i32>>>(
SparseTensorIndexCSF::VT_AXISORDER,
None,
)
.unwrap()
}
}
impl flatbuffers::Verifiable for SparseTensorIndexCSF<'_> {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
v.visit_table(pos)?
.visit_field::<flatbuffers::ForwardsUOffset<Int>>(
&"indptrType",
Self::VT_INDPTRTYPE,
true,
)?
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, Buffer>>>(
&"indptrBuffers",
Self::VT_INDPTRBUFFERS,
true,
)?
.visit_field::<flatbuffers::ForwardsUOffset<Int>>(
&"indicesType",
Self::VT_INDICESTYPE,
true,
)?
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, Buffer>>>(
&"indicesBuffers",
Self::VT_INDICESBUFFERS,
true,
)?
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, i32>>>(
&"axisOrder",
Self::VT_AXISORDER,
true,
)?
.finish();
Ok(())
}
}
pub struct SparseTensorIndexCSFArgs<'a> {
pub indptrType: Option<flatbuffers::WIPOffset<Int<'a>>>,
pub indptrBuffers: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, Buffer>>>,
pub indicesType: Option<flatbuffers::WIPOffset<Int<'a>>>,
pub indicesBuffers: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, Buffer>>>,
pub axisOrder: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, i32>>>,
}
impl<'a> Default for SparseTensorIndexCSFArgs<'a> {
#[inline]
fn default() -> Self {
SparseTensorIndexCSFArgs {
indptrType: None, // required field
indptrBuffers: None, // required field
indicesType: None, // required field
indicesBuffers: None, // required field
axisOrder: None, // required field
}
}
}
pub struct SparseTensorIndexCSFBuilder<'a: 'b, 'b> {
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>,
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
}
impl<'a: 'b, 'b> SparseTensorIndexCSFBuilder<'a, 'b> {
#[inline]
pub fn add_indptrType(&mut self, indptrType: flatbuffers::WIPOffset<Int<'b>>) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Int>>(
SparseTensorIndexCSF::VT_INDPTRTYPE,
indptrType,
);
}
#[inline]
pub fn add_indptrBuffers(
&mut self,
indptrBuffers: flatbuffers::WIPOffset<flatbuffers::Vector<'b, Buffer>>,
) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(
SparseTensorIndexCSF::VT_INDPTRBUFFERS,
indptrBuffers,
);
}
#[inline]
pub fn add_indicesType(&mut self, indicesType: flatbuffers::WIPOffset<Int<'b>>) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Int>>(
SparseTensorIndexCSF::VT_INDICESTYPE,
indicesType,
);
}
#[inline]
pub fn add_indicesBuffers(
&mut self,
indicesBuffers: flatbuffers::WIPOffset<flatbuffers::Vector<'b, Buffer>>,
) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(
SparseTensorIndexCSF::VT_INDICESBUFFERS,
indicesBuffers,
);
}
#[inline]
pub fn add_axisOrder(
&mut self,
axisOrder: flatbuffers::WIPOffset<flatbuffers::Vector<'b, i32>>,
) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(
SparseTensorIndexCSF::VT_AXISORDER,
axisOrder,
);
}
#[inline]
pub fn new(
_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
) -> SparseTensorIndexCSFBuilder<'a, 'b> {
let start = _fbb.start_table();
SparseTensorIndexCSFBuilder {
fbb_: _fbb,
start_: start,
}
}
#[inline]
pub fn finish(self) -> flatbuffers::WIPOffset<SparseTensorIndexCSF<'a>> {
let o = self.fbb_.end_table(self.start_);
self.fbb_
.required(o, SparseTensorIndexCSF::VT_INDPTRTYPE, "indptrType");
self.fbb_
.required(o, SparseTensorIndexCSF::VT_INDPTRBUFFERS, "indptrBuffers");
self.fbb_
.required(o, SparseTensorIndexCSF::VT_INDICESTYPE, "indicesType");
self.fbb_
.required(o, SparseTensorIndexCSF::VT_INDICESBUFFERS, "indicesBuffers");
self.fbb_
.required(o, SparseTensorIndexCSF::VT_AXISORDER, "axisOrder");
flatbuffers::WIPOffset::new(o.value())
}
}
impl std::fmt::Debug for SparseTensorIndexCSF<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut ds = f.debug_struct("SparseTensorIndexCSF");
ds.field("indptrType", &self.indptrType());
ds.field("indptrBuffers", &self.indptrBuffers());
ds.field("indicesType", &self.indicesType());
ds.field("indicesBuffers", &self.indicesBuffers());
ds.field("axisOrder", &self.axisOrder());
ds.finish()
}
}
pub enum SparseTensorOffset {}
#[derive(Copy, Clone, PartialEq)]
pub struct SparseTensor<'a> {
pub _tab: flatbuffers::Table<'a>,
}
impl<'a> flatbuffers::Follow<'a> for SparseTensor<'a> {
type Inner = SparseTensor<'a>;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
Self {
_tab: flatbuffers::Table { buf, loc },
}
}
}
impl<'a> SparseTensor<'a> {
#[inline]
pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
SparseTensor { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>,
args: &'args SparseTensorArgs<'args>,
) -> flatbuffers::WIPOffset<SparseTensor<'bldr>> {
let mut builder = SparseTensorBuilder::new(_fbb);
builder.add_non_zero_length(args.non_zero_length);
if let Some(x) = args.data {
builder.add_data(x);
}
if let Some(x) = args.sparseIndex {
builder.add_sparseIndex(x);
}
if let Some(x) = args.shape {
builder.add_shape(x);
}
if let Some(x) = args.type_ {
builder.add_type_(x);
}
builder.add_sparseIndex_type(args.sparseIndex_type);
builder.add_type_type(args.type_type);
builder.finish()
}
pub const VT_TYPE_TYPE: flatbuffers::VOffsetT = 4;
pub const VT_TYPE_: flatbuffers::VOffsetT = 6;
pub const VT_SHAPE: flatbuffers::VOffsetT = 8;
pub const VT_NON_ZERO_LENGTH: flatbuffers::VOffsetT = 10;
pub const VT_SPARSEINDEX_TYPE: flatbuffers::VOffsetT = 12;
pub const VT_SPARSEINDEX: flatbuffers::VOffsetT = 14;
pub const VT_DATA: flatbuffers::VOffsetT = 16;
#[inline]
pub fn type_type(&self) -> Type {
self._tab
.get::<Type>(SparseTensor::VT_TYPE_TYPE, Some(Type::NONE))
.unwrap()
}
/// The type of data contained in a value cell.
/// Currently only fixed-width value types are supported,
/// no strings or nested types.
#[inline]
pub fn type_(&self) -> flatbuffers::Table<'a> {
self._tab
.get::<flatbuffers::ForwardsUOffset<flatbuffers::Table<'a>>>(
SparseTensor::VT_TYPE_,
None,
)
.unwrap()
}
/// The dimensions of the tensor, optionally named.
#[inline]
pub fn shape(
&self,
) -> flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset<TensorDim<'a>>> {
self._tab
.get::<flatbuffers::ForwardsUOffset<
flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset<TensorDim>>,
>>(SparseTensor::VT_SHAPE, None)
.unwrap()
}
/// The number of non-zero values in a sparse tensor.
#[inline]
pub fn non_zero_length(&self) -> i64 {
self._tab
.get::<i64>(SparseTensor::VT_NON_ZERO_LENGTH, Some(0))
.unwrap()
}
#[inline]
pub fn sparseIndex_type(&self) -> SparseTensorIndex {
self._tab
.get::<SparseTensorIndex>(
SparseTensor::VT_SPARSEINDEX_TYPE,
Some(SparseTensorIndex::NONE),
)
.unwrap()
}
/// Sparse tensor index
#[inline]
pub fn sparseIndex(&self) -> flatbuffers::Table<'a> {
self._tab
.get::<flatbuffers::ForwardsUOffset<flatbuffers::Table<'a>>>(
SparseTensor::VT_SPARSEINDEX,
None,
)
.unwrap()
}
/// The location and size of the tensor's data
#[inline]
pub fn data(&self) -> &'a Buffer {
self._tab
.get::<Buffer>(SparseTensor::VT_DATA, None)
.unwrap()
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_null(&self) -> Option<Null<'a>> {
if self.type_type() == Type::Null {
let u = self.type_();
Some(Null::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_int(&self) -> Option<Int<'a>> {
if self.type_type() == Type::Int {
let u = self.type_();
Some(Int::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_floating_point(&self) -> Option<FloatingPoint<'a>> {
if self.type_type() == Type::FloatingPoint {
let u = self.type_();
Some(FloatingPoint::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_binary(&self) -> Option<Binary<'a>> {
if self.type_type() == Type::Binary {
let u = self.type_();
Some(Binary::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_utf_8(&self) -> Option<Utf8<'a>> {
if self.type_type() == Type::Utf8 {
let u = self.type_();
Some(Utf8::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_bool(&self) -> Option<Bool<'a>> {
if self.type_type() == Type::Bool {
let u = self.type_();
Some(Bool::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_decimal(&self) -> Option<Decimal<'a>> {
if self.type_type() == Type::Decimal {
let u = self.type_();
Some(Decimal::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_date(&self) -> Option<Date<'a>> {
if self.type_type() == Type::Date {
let u = self.type_();
Some(Date::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_time(&self) -> Option<Time<'a>> {
if self.type_type() == Type::Time {
let u = self.type_();
Some(Time::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_timestamp(&self) -> Option<Timestamp<'a>> {
if self.type_type() == Type::Timestamp {
let u = self.type_();
Some(Timestamp::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_interval(&self) -> Option<Interval<'a>> {
if self.type_type() == Type::Interval {
let u = self.type_();
Some(Interval::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_list(&self) -> Option<List<'a>> {
if self.type_type() == Type::List {
let u = self.type_();
Some(List::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_struct_(&self) -> Option<Struct_<'a>> {
if self.type_type() == Type::Struct_ {
let u = self.type_();
Some(Struct_::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_union(&self) -> Option<Union<'a>> {
if self.type_type() == Type::Union {
let u = self.type_();
Some(Union::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_fixed_size_binary(&self) -> Option<FixedSizeBinary<'a>> {
if self.type_type() == Type::FixedSizeBinary {
let u = self.type_();
Some(FixedSizeBinary::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_fixed_size_list(&self) -> Option<FixedSizeList<'a>> {
if self.type_type() == Type::FixedSizeList {
let u = self.type_();
Some(FixedSizeList::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_map(&self) -> Option<Map<'a>> {
if self.type_type() == Type::Map {
let u = self.type_();
Some(Map::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_duration(&self) -> Option<Duration<'a>> {
if self.type_type() == Type::Duration {
let u = self.type_();
Some(Duration::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_large_binary(&self) -> Option<LargeBinary<'a>> {
if self.type_type() == Type::LargeBinary {
let u = self.type_();
Some(LargeBinary::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_large_utf_8(&self) -> Option<LargeUtf8<'a>> {
if self.type_type() == Type::LargeUtf8 {
let u = self.type_();
Some(LargeUtf8::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_large_list(&self) -> Option<LargeList<'a>> {
if self.type_type() == Type::LargeList {
let u = self.type_();
Some(LargeList::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn sparseIndex_as_sparse_tensor_index_coo(
&self,
) -> Option<SparseTensorIndexCOO<'a>> {
if self.sparseIndex_type() == SparseTensorIndex::SparseTensorIndexCOO {
let u = self.sparseIndex();
Some(SparseTensorIndexCOO::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn sparseIndex_as_sparse_matrix_index_csx(
&self,
) -> Option<SparseMatrixIndexCSX<'a>> {
if self.sparseIndex_type() == SparseTensorIndex::SparseMatrixIndexCSX {
let u = self.sparseIndex();
Some(SparseMatrixIndexCSX::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn sparseIndex_as_sparse_tensor_index_csf(
&self,
) -> Option<SparseTensorIndexCSF<'a>> {
if self.sparseIndex_type() == SparseTensorIndex::SparseTensorIndexCSF {
let u = self.sparseIndex();
Some(SparseTensorIndexCSF::init_from_table(u))
} else {
None
}
}
}
impl flatbuffers::Verifiable for SparseTensor<'_> {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
v.visit_table(pos)?
.visit_union::<Type, _>(&"type_type", Self::VT_TYPE_TYPE, &"type_", Self::VT_TYPE_, true, |key, v, pos| {
match key {
Type::Null => v.verify_union_variant::<flatbuffers::ForwardsUOffset<Null>>("Type::Null", pos),
Type::Int => v.verify_union_variant::<flatbuffers::ForwardsUOffset<Int>>("Type::Int", pos),
Type::FloatingPoint => v.verify_union_variant::<flatbuffers::ForwardsUOffset<FloatingPoint>>("Type::FloatingPoint", pos),
Type::Binary => v.verify_union_variant::<flatbuffers::ForwardsUOffset<Binary>>("Type::Binary", pos),
Type::Utf8 => v.verify_union_variant::<flatbuffers::ForwardsUOffset<Utf8>>("Type::Utf8", pos),
Type::Bool => v.verify_union_variant::<flatbuffers::ForwardsUOffset<Bool>>("Type::Bool", pos),
Type::Decimal => v.verify_union_variant::<flatbuffers::ForwardsUOffset<Decimal>>("Type::Decimal", pos),
Type::Date => v.verify_union_variant::<flatbuffers::ForwardsUOffset<Date>>("Type::Date", pos),
Type::Time => v.verify_union_variant::<flatbuffers::ForwardsUOffset<Time>>("Type::Time", pos),
Type::Timestamp => v.verify_union_variant::<flatbuffers::ForwardsUOffset<Timestamp>>("Type::Timestamp", pos),
Type::Interval => v.verify_union_variant::<flatbuffers::ForwardsUOffset<Interval>>("Type::Interval", pos),
Type::List => v.verify_union_variant::<flatbuffers::ForwardsUOffset<List>>("Type::List", pos),
Type::Struct_ => v.verify_union_variant::<flatbuffers::ForwardsUOffset<Struct_>>("Type::Struct_", pos),
Type::Union => v.verify_union_variant::<flatbuffers::ForwardsUOffset<Union>>("Type::Union", pos),
Type::FixedSizeBinary => v.verify_union_variant::<flatbuffers::ForwardsUOffset<FixedSizeBinary>>("Type::FixedSizeBinary", pos),
Type::FixedSizeList => v.verify_union_variant::<flatbuffers::ForwardsUOffset<FixedSizeList>>("Type::FixedSizeList", pos),
Type::Map => v.verify_union_variant::<flatbuffers::ForwardsUOffset<Map>>("Type::Map", pos),
Type::Duration => v.verify_union_variant::<flatbuffers::ForwardsUOffset<Duration>>("Type::Duration", pos),
Type::LargeBinary => v.verify_union_variant::<flatbuffers::ForwardsUOffset<LargeBinary>>("Type::LargeBinary", pos),
Type::LargeUtf8 => v.verify_union_variant::<flatbuffers::ForwardsUOffset<LargeUtf8>>("Type::LargeUtf8", pos),
Type::LargeList => v.verify_union_variant::<flatbuffers::ForwardsUOffset<LargeList>>("Type::LargeList", pos),
_ => Ok(()),
}
})?
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, flatbuffers::ForwardsUOffset<TensorDim>>>>(&"shape", Self::VT_SHAPE, true)?
.visit_field::<i64>(&"non_zero_length", Self::VT_NON_ZERO_LENGTH, false)?
.visit_union::<SparseTensorIndex, _>(&"sparseIndex_type", Self::VT_SPARSEINDEX_TYPE, &"sparseIndex", Self::VT_SPARSEINDEX, true, |key, v, pos| {
match key {
SparseTensorIndex::SparseTensorIndexCOO => v.verify_union_variant::<flatbuffers::ForwardsUOffset<SparseTensorIndexCOO>>("SparseTensorIndex::SparseTensorIndexCOO", pos),
SparseTensorIndex::SparseMatrixIndexCSX => v.verify_union_variant::<flatbuffers::ForwardsUOffset<SparseMatrixIndexCSX>>("SparseTensorIndex::SparseMatrixIndexCSX", pos),
SparseTensorIndex::SparseTensorIndexCSF => v.verify_union_variant::<flatbuffers::ForwardsUOffset<SparseTensorIndexCSF>>("SparseTensorIndex::SparseTensorIndexCSF", pos),
_ => Ok(()),
}
})?
.visit_field::<Buffer>(&"data", Self::VT_DATA, true)?
.finish();
Ok(())
}
}
pub struct SparseTensorArgs<'a> {
pub type_type: Type,
pub type_: Option<flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>>,
pub shape: Option<
flatbuffers::WIPOffset<
flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset<TensorDim<'a>>>,
>,
>,
pub non_zero_length: i64,
pub sparseIndex_type: SparseTensorIndex,
pub sparseIndex: Option<flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>>,
pub data: Option<&'a Buffer>,
}
impl<'a> Default for SparseTensorArgs<'a> {
#[inline]
fn default() -> Self {
SparseTensorArgs {
type_type: Type::NONE,
type_: None, // required field
shape: None, // required field
non_zero_length: 0,
sparseIndex_type: SparseTensorIndex::NONE,
sparseIndex: None, // required field
data: None, // required field
}
}
}
pub struct SparseTensorBuilder<'a: 'b, 'b> {
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>,
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
}
impl<'a: 'b, 'b> SparseTensorBuilder<'a, 'b> {
#[inline]
pub fn add_type_type(&mut self, type_type: Type) {
self.fbb_
.push_slot::<Type>(SparseTensor::VT_TYPE_TYPE, type_type, Type::NONE);
}
#[inline]
pub fn add_type_(
&mut self,
type_: flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>,
) {
self.fbb_
.push_slot_always::<flatbuffers::WIPOffset<_>>(SparseTensor::VT_TYPE_, type_);
}
#[inline]
pub fn add_shape(
&mut self,
shape: flatbuffers::WIPOffset<
flatbuffers::Vector<'b, flatbuffers::ForwardsUOffset<TensorDim<'b>>>,
>,
) {
self.fbb_
.push_slot_always::<flatbuffers::WIPOffset<_>>(SparseTensor::VT_SHAPE, shape);
}
#[inline]
pub fn add_non_zero_length(&mut self, non_zero_length: i64) {
self.fbb_
.push_slot::<i64>(SparseTensor::VT_NON_ZERO_LENGTH, non_zero_length, 0);
}
#[inline]
pub fn add_sparseIndex_type(&mut self, sparseIndex_type: SparseTensorIndex) {
self.fbb_.push_slot::<SparseTensorIndex>(
SparseTensor::VT_SPARSEINDEX_TYPE,
sparseIndex_type,
SparseTensorIndex::NONE,
);
}
#[inline]
pub fn add_sparseIndex(
&mut self,
sparseIndex: flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>,
) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(
SparseTensor::VT_SPARSEINDEX,
sparseIndex,
);
}
#[inline]
pub fn add_data(&mut self, data: &Buffer) {
self.fbb_
.push_slot_always::<&Buffer>(SparseTensor::VT_DATA, data);
}
#[inline]
pub fn new(
_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
) -> SparseTensorBuilder<'a, 'b> {
let start = _fbb.start_table();
SparseTensorBuilder {
fbb_: _fbb,
start_: start,
}
}
#[inline]
pub fn finish(self) -> flatbuffers::WIPOffset<SparseTensor<'a>> {
let o = self.fbb_.end_table(self.start_);
self.fbb_.required(o, SparseTensor::VT_TYPE_, "type_");
self.fbb_.required(o, SparseTensor::VT_SHAPE, "shape");
self.fbb_
.required(o, SparseTensor::VT_SPARSEINDEX, "sparseIndex");
self.fbb_.required(o, SparseTensor::VT_DATA, "data");
flatbuffers::WIPOffset::new(o.value())
}
}
impl std::fmt::Debug for SparseTensor<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut ds = f.debug_struct("SparseTensor");
ds.field("type_type", &self.type_type());
match self.type_type() {
Type::Null => {
if let Some(x) = self.type_as_null() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::Int => {
if let Some(x) = self.type_as_int() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::FloatingPoint => {
if let Some(x) = self.type_as_floating_point() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::Binary => {
if let Some(x) = self.type_as_binary() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::Utf8 => {
if let Some(x) = self.type_as_utf_8() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::Bool => {
if let Some(x) = self.type_as_bool() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::Decimal => {
if let Some(x) = self.type_as_decimal() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::Date => {
if let Some(x) = self.type_as_date() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::Time => {
if let Some(x) = self.type_as_time() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::Timestamp => {
if let Some(x) = self.type_as_timestamp() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::Interval => {
if let Some(x) = self.type_as_interval() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::List => {
if let Some(x) = self.type_as_list() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::Struct_ => {
if let Some(x) = self.type_as_struct_() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::Union => {
if let Some(x) = self.type_as_union() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::FixedSizeBinary => {
if let Some(x) = self.type_as_fixed_size_binary() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::FixedSizeList => {
if let Some(x) = self.type_as_fixed_size_list() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::Map => {
if let Some(x) = self.type_as_map() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::Duration => {
if let Some(x) = self.type_as_duration() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::LargeBinary => {
if let Some(x) = self.type_as_large_binary() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::LargeUtf8 => {
if let Some(x) = self.type_as_large_utf_8() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
Type::LargeList => {
if let Some(x) = self.type_as_large_list() {
ds.field("type_", &x)
} else {
ds.field(
"type_",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
_ => {
let x: Option<()> = None;
ds.field("type_", &x)
}
};
ds.field("shape", &self.shape());
ds.field("non_zero_length", &self.non_zero_length());
ds.field("sparseIndex_type", &self.sparseIndex_type());
match self.sparseIndex_type() {
SparseTensorIndex::SparseTensorIndexCOO => {
if let Some(x) = self.sparseIndex_as_sparse_tensor_index_coo() {
ds.field("sparseIndex", &x)
} else {
ds.field(
"sparseIndex",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
SparseTensorIndex::SparseMatrixIndexCSX => {
if let Some(x) = self.sparseIndex_as_sparse_matrix_index_csx() {
ds.field("sparseIndex", &x)
} else {
ds.field(
"sparseIndex",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
SparseTensorIndex::SparseTensorIndexCSF => {
if let Some(x) = self.sparseIndex_as_sparse_tensor_index_csf() {
ds.field("sparseIndex", &x)
} else {
ds.field(
"sparseIndex",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
_ => {
let x: Option<()> = None;
ds.field("sparseIndex", &x)
}
};
ds.field("data", &self.data());
ds.finish()
}
}
#[inline]
#[deprecated(since = "2.0.0", note = "Deprecated in favor of `root_as...` methods.")]
pub fn get_root_as_sparse_tensor<'a>(buf: &'a [u8]) -> SparseTensor<'a> {
unsafe { flatbuffers::root_unchecked::<SparseTensor<'a>>(buf) }
}
#[inline]
#[deprecated(since = "2.0.0", note = "Deprecated in favor of `root_as...` methods.")]
pub fn get_size_prefixed_root_as_sparse_tensor<'a>(buf: &'a [u8]) -> SparseTensor<'a> {
unsafe { flatbuffers::size_prefixed_root_unchecked::<SparseTensor<'a>>(buf) }
}
#[inline]
/// Verifies that a buffer of bytes contains a `SparseTensor`
/// and returns it.
/// Note that verification is still experimental and may not
/// catch every error, or be maximally performant. For the
/// previous, unchecked, behavior use
/// `root_as_sparse_tensor_unchecked`.
pub fn root_as_sparse_tensor(
buf: &[u8],
) -> Result<SparseTensor, flatbuffers::InvalidFlatbuffer> {
flatbuffers::root::<SparseTensor>(buf)
}
#[inline]
/// Verifies that a buffer of bytes contains a size prefixed
/// `SparseTensor` and returns it.
/// Note that verification is still experimental and may not
/// catch every error, or be maximally performant. For the
/// previous, unchecked, behavior use
/// `size_prefixed_root_as_sparse_tensor_unchecked`.
pub fn size_prefixed_root_as_sparse_tensor(
buf: &[u8],
) -> Result<SparseTensor, flatbuffers::InvalidFlatbuffer> {
flatbuffers::size_prefixed_root::<SparseTensor>(buf)
}
#[inline]
/// Verifies, with the given options, that a buffer of bytes
/// contains a `SparseTensor` and returns it.
/// Note that verification is still experimental and may not
/// catch every error, or be maximally performant. For the
/// previous, unchecked, behavior use
/// `root_as_sparse_tensor_unchecked`.
pub fn root_as_sparse_tensor_with_opts<'b, 'o>(
opts: &'o flatbuffers::VerifierOptions,
buf: &'b [u8],
) -> Result<SparseTensor<'b>, flatbuffers::InvalidFlatbuffer> {
flatbuffers::root_with_opts::<SparseTensor<'b>>(opts, buf)
}
#[inline]
/// Verifies, with the given verifier options, that a buffer of
/// bytes contains a size prefixed `SparseTensor` and returns
/// it. Note that verification is still experimental and may not
/// catch every error, or be maximally performant. For the
/// previous, unchecked, behavior use
/// `root_as_sparse_tensor_unchecked`.
pub fn size_prefixed_root_as_sparse_tensor_with_opts<'b, 'o>(
opts: &'o flatbuffers::VerifierOptions,
buf: &'b [u8],
) -> Result<SparseTensor<'b>, flatbuffers::InvalidFlatbuffer> {
flatbuffers::size_prefixed_root_with_opts::<SparseTensor<'b>>(opts, buf)
}
#[inline]
/// Assumes, without verification, that a buffer of bytes contains a SparseTensor and returns it.
/// # Safety
/// Callers must trust the given bytes do indeed contain a valid `SparseTensor`.
pub unsafe fn root_as_sparse_tensor_unchecked(buf: &[u8]) -> SparseTensor {
flatbuffers::root_unchecked::<SparseTensor>(buf)
}
#[inline]
/// Assumes, without verification, that a buffer of bytes contains a size prefixed SparseTensor and returns it.
/// # Safety
/// Callers must trust the given bytes do indeed contain a valid size prefixed `SparseTensor`.
pub unsafe fn size_prefixed_root_as_sparse_tensor_unchecked(buf: &[u8]) -> SparseTensor {
flatbuffers::size_prefixed_root_unchecked::<SparseTensor>(buf)
}
#[inline]
pub fn finish_sparse_tensor_buffer<'a, 'b>(
fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
root: flatbuffers::WIPOffset<SparseTensor<'a>>,
) {
fbb.finish(root, None);
}
#[inline]
pub fn finish_size_prefixed_sparse_tensor_buffer<'a, 'b>(
fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
root: flatbuffers::WIPOffset<SparseTensor<'a>>,
) {
fbb.finish_size_prefixed(root, None);
}