blob: 7903844a1fea67a83fb878d98f8ed38c24cc675d [file] [log] [blame]
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#![allow(dead_code)]
#![allow(unused_imports)]
use crate::ipc::gen::Schema::*;
use crate::ipc::gen::SparseTensor::*;
use crate::ipc::gen::Tensor::*;
use flatbuffers::EndianScalar;
use std::{cmp::Ordering, mem};
// automatically generated by the FlatBuffers compiler, do not modify
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
pub const ENUM_MIN_COMPRESSION_TYPE: i8 = 0;
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
pub const ENUM_MAX_COMPRESSION_TYPE: i8 = 1;
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
#[allow(non_camel_case_types)]
pub const ENUM_VALUES_COMPRESSION_TYPE: [CompressionType; 2] =
[CompressionType::LZ4_FRAME, CompressionType::ZSTD];
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(transparent)]
pub struct CompressionType(pub i8);
#[allow(non_upper_case_globals)]
impl CompressionType {
pub const LZ4_FRAME: Self = Self(0);
pub const ZSTD: Self = Self(1);
pub const ENUM_MIN: i8 = 0;
pub const ENUM_MAX: i8 = 1;
pub const ENUM_VALUES: &'static [Self] = &[Self::LZ4_FRAME, Self::ZSTD];
/// Returns the variant's name or "" if unknown.
pub fn variant_name(self) -> Option<&'static str> {
match self {
Self::LZ4_FRAME => Some("LZ4_FRAME"),
Self::ZSTD => Some("ZSTD"),
_ => None,
}
}
}
impl std::fmt::Debug for CompressionType {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(name) = self.variant_name() {
f.write_str(name)
} else {
f.write_fmt(format_args!("<UNKNOWN {:?}>", self.0))
}
}
}
impl<'a> flatbuffers::Follow<'a> for CompressionType {
type Inner = Self;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
let b = flatbuffers::read_scalar_at::<i8>(buf, loc);
Self(b)
}
}
impl flatbuffers::Push for CompressionType {
type Output = CompressionType;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
flatbuffers::emplace_scalar::<i8>(dst, self.0);
}
}
impl flatbuffers::EndianScalar for CompressionType {
#[inline]
fn to_little_endian(self) -> Self {
let b = i8::to_le(self.0);
Self(b)
}
#[inline]
fn from_little_endian(self) -> Self {
let b = i8::from_le(self.0);
Self(b)
}
}
impl<'a> flatbuffers::Verifiable for CompressionType {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
i8::run_verifier(v, pos)
}
}
impl flatbuffers::SimpleToVerifyInSlice for CompressionType {}
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
pub const ENUM_MIN_BODY_COMPRESSION_METHOD: i8 = 0;
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
pub const ENUM_MAX_BODY_COMPRESSION_METHOD: i8 = 0;
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
#[allow(non_camel_case_types)]
pub const ENUM_VALUES_BODY_COMPRESSION_METHOD: [BodyCompressionMethod; 1] =
[BodyCompressionMethod::BUFFER];
/// Provided for forward compatibility in case we need to support different
/// strategies for compressing the IPC message body (like whole-body
/// compression rather than buffer-level) in the future
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(transparent)]
pub struct BodyCompressionMethod(pub i8);
#[allow(non_upper_case_globals)]
impl BodyCompressionMethod {
/// Each constituent buffer is first compressed with the indicated
/// compressor, and then written with the uncompressed length in the first 8
/// bytes as a 64-bit little-endian signed integer followed by the compressed
/// buffer bytes (and then padding as required by the protocol). The
/// uncompressed length may be set to -1 to indicate that the data that
/// follows is not compressed, which can be useful for cases where
/// compression does not yield appreciable savings.
pub const BUFFER: Self = Self(0);
pub const ENUM_MIN: i8 = 0;
pub const ENUM_MAX: i8 = 0;
pub const ENUM_VALUES: &'static [Self] = &[Self::BUFFER];
/// Returns the variant's name or "" if unknown.
pub fn variant_name(self) -> Option<&'static str> {
match self {
Self::BUFFER => Some("BUFFER"),
_ => None,
}
}
}
impl std::fmt::Debug for BodyCompressionMethod {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(name) = self.variant_name() {
f.write_str(name)
} else {
f.write_fmt(format_args!("<UNKNOWN {:?}>", self.0))
}
}
}
impl<'a> flatbuffers::Follow<'a> for BodyCompressionMethod {
type Inner = Self;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
let b = flatbuffers::read_scalar_at::<i8>(buf, loc);
Self(b)
}
}
impl flatbuffers::Push for BodyCompressionMethod {
type Output = BodyCompressionMethod;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
flatbuffers::emplace_scalar::<i8>(dst, self.0);
}
}
impl flatbuffers::EndianScalar for BodyCompressionMethod {
#[inline]
fn to_little_endian(self) -> Self {
let b = i8::to_le(self.0);
Self(b)
}
#[inline]
fn from_little_endian(self) -> Self {
let b = i8::from_le(self.0);
Self(b)
}
}
impl<'a> flatbuffers::Verifiable for BodyCompressionMethod {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
i8::run_verifier(v, pos)
}
}
impl flatbuffers::SimpleToVerifyInSlice for BodyCompressionMethod {}
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
pub const ENUM_MIN_MESSAGE_HEADER: u8 = 0;
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
pub const ENUM_MAX_MESSAGE_HEADER: u8 = 5;
#[deprecated(
since = "2.0.0",
note = "Use associated constants instead. This will no longer be generated in 2021."
)]
#[allow(non_camel_case_types)]
pub const ENUM_VALUES_MESSAGE_HEADER: [MessageHeader; 6] = [
MessageHeader::NONE,
MessageHeader::Schema,
MessageHeader::DictionaryBatch,
MessageHeader::RecordBatch,
MessageHeader::Tensor,
MessageHeader::SparseTensor,
];
/// ----------------------------------------------------------------------
/// The root Message type
/// This union enables us to easily send different message types without
/// redundant storage, and in the future we can easily add new message types.
///
/// Arrow implementations do not need to implement all of the message types,
/// which may include experimental metadata types. For maximum compatibility,
/// it is best to send data using RecordBatch
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(transparent)]
pub struct MessageHeader(pub u8);
#[allow(non_upper_case_globals)]
impl MessageHeader {
pub const NONE: Self = Self(0);
pub const Schema: Self = Self(1);
pub const DictionaryBatch: Self = Self(2);
pub const RecordBatch: Self = Self(3);
pub const Tensor: Self = Self(4);
pub const SparseTensor: Self = Self(5);
pub const ENUM_MIN: u8 = 0;
pub const ENUM_MAX: u8 = 5;
pub const ENUM_VALUES: &'static [Self] = &[
Self::NONE,
Self::Schema,
Self::DictionaryBatch,
Self::RecordBatch,
Self::Tensor,
Self::SparseTensor,
];
/// Returns the variant's name or "" if unknown.
pub fn variant_name(self) -> Option<&'static str> {
match self {
Self::NONE => Some("NONE"),
Self::Schema => Some("Schema"),
Self::DictionaryBatch => Some("DictionaryBatch"),
Self::RecordBatch => Some("RecordBatch"),
Self::Tensor => Some("Tensor"),
Self::SparseTensor => Some("SparseTensor"),
_ => None,
}
}
}
impl std::fmt::Debug for MessageHeader {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(name) = self.variant_name() {
f.write_str(name)
} else {
f.write_fmt(format_args!("<UNKNOWN {:?}>", self.0))
}
}
}
pub struct MessageHeaderUnionTableOffset {}
impl<'a> flatbuffers::Follow<'a> for MessageHeader {
type Inner = Self;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
let b = flatbuffers::read_scalar_at::<u8>(buf, loc);
Self(b)
}
}
impl flatbuffers::Push for MessageHeader {
type Output = MessageHeader;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
flatbuffers::emplace_scalar::<u8>(dst, self.0);
}
}
impl flatbuffers::EndianScalar for MessageHeader {
#[inline]
fn to_little_endian(self) -> Self {
let b = u8::to_le(self.0);
Self(b)
}
#[inline]
fn from_little_endian(self) -> Self {
let b = u8::from_le(self.0);
Self(b)
}
}
impl<'a> flatbuffers::Verifiable for MessageHeader {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
u8::run_verifier(v, pos)
}
}
impl flatbuffers::SimpleToVerifyInSlice for MessageHeader {}
/// ----------------------------------------------------------------------
/// Data structures for describing a table row batch (a collection of
/// equal-length Arrow arrays)
/// Metadata about a field at some level of a nested type tree (but not
/// its children).
///
/// For example, a List<Int16> with values `[[1, 2, 3], null, [4], [5, 6], null]`
/// would have {length: 5, null_count: 2} for its List node, and {length: 6,
/// null_count: 0} for its Int16 node, as separate FieldNode structs
// struct FieldNode, aligned to 8
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq)]
pub struct FieldNode(pub [u8; 16]);
impl std::fmt::Debug for FieldNode {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_struct("FieldNode")
.field("length", &self.length())
.field("null_count", &self.null_count())
.finish()
}
}
impl flatbuffers::SimpleToVerifyInSlice for FieldNode {}
impl flatbuffers::SafeSliceAccess for FieldNode {}
impl<'a> flatbuffers::Follow<'a> for FieldNode {
type Inner = &'a FieldNode;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
<&'a FieldNode>::follow(buf, loc)
}
}
impl<'a> flatbuffers::Follow<'a> for &'a FieldNode {
type Inner = &'a FieldNode;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
flatbuffers::follow_cast_ref::<FieldNode>(buf, loc)
}
}
impl<'b> flatbuffers::Push for FieldNode {
type Output = FieldNode;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
let src = unsafe {
::std::slice::from_raw_parts(
self as *const FieldNode as *const u8,
Self::size(),
)
};
dst.copy_from_slice(src);
}
}
impl<'b> flatbuffers::Push for &'b FieldNode {
type Output = FieldNode;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
let src = unsafe {
::std::slice::from_raw_parts(
*self as *const FieldNode as *const u8,
Self::size(),
)
};
dst.copy_from_slice(src);
}
}
impl<'a> flatbuffers::Verifiable for FieldNode {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
v.in_buffer::<Self>(pos)
}
}
impl FieldNode {
#[allow(clippy::too_many_arguments)]
pub fn new(length: i64, null_count: i64) -> Self {
let mut s = Self([0; 16]);
s.set_length(length);
s.set_null_count(null_count);
s
}
/// The number of value slots in the Arrow array at this level of a nested
/// tree
pub fn length(&self) -> i64 {
let mut mem = core::mem::MaybeUninit::<i64>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[0..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<i64>(),
);
mem.assume_init()
}
.from_little_endian()
}
pub fn set_length(&mut self, x: i64) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const i64 as *const u8,
self.0[0..].as_mut_ptr(),
core::mem::size_of::<i64>(),
);
}
}
/// The number of observed nulls. Fields with null_count == 0 may choose not
/// to write their physical validity bitmap out as a materialized buffer,
/// instead setting the length of the bitmap buffer to 0.
pub fn null_count(&self) -> i64 {
let mut mem = core::mem::MaybeUninit::<i64>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[8..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<i64>(),
);
mem.assume_init()
}
.from_little_endian()
}
pub fn set_null_count(&mut self, x: i64) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const i64 as *const u8,
self.0[8..].as_mut_ptr(),
core::mem::size_of::<i64>(),
);
}
}
}
pub enum BodyCompressionOffset {}
#[derive(Copy, Clone, PartialEq)]
/// Optional compression for the memory buffers constituting IPC message
/// bodies. Intended for use with RecordBatch but could be used for other
/// message types
pub struct BodyCompression<'a> {
pub _tab: flatbuffers::Table<'a>,
}
impl<'a> flatbuffers::Follow<'a> for BodyCompression<'a> {
type Inner = BodyCompression<'a>;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
Self {
_tab: flatbuffers::Table { buf, loc },
}
}
}
impl<'a> BodyCompression<'a> {
#[inline]
pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
BodyCompression { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>,
args: &'args BodyCompressionArgs,
) -> flatbuffers::WIPOffset<BodyCompression<'bldr>> {
let mut builder = BodyCompressionBuilder::new(_fbb);
builder.add_method(args.method);
builder.add_codec(args.codec);
builder.finish()
}
pub const VT_CODEC: flatbuffers::VOffsetT = 4;
pub const VT_METHOD: flatbuffers::VOffsetT = 6;
/// Compressor library
#[inline]
pub fn codec(&self) -> CompressionType {
self._tab
.get::<CompressionType>(
BodyCompression::VT_CODEC,
Some(CompressionType::LZ4_FRAME),
)
.unwrap()
}
/// Indicates the way the record batch body was compressed
#[inline]
pub fn method(&self) -> BodyCompressionMethod {
self._tab
.get::<BodyCompressionMethod>(
BodyCompression::VT_METHOD,
Some(BodyCompressionMethod::BUFFER),
)
.unwrap()
}
}
impl flatbuffers::Verifiable for BodyCompression<'_> {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
v.visit_table(pos)?
.visit_field::<CompressionType>(&"codec", Self::VT_CODEC, false)?
.visit_field::<BodyCompressionMethod>(&"method", Self::VT_METHOD, false)?
.finish();
Ok(())
}
}
pub struct BodyCompressionArgs {
pub codec: CompressionType,
pub method: BodyCompressionMethod,
}
impl<'a> Default for BodyCompressionArgs {
#[inline]
fn default() -> Self {
BodyCompressionArgs {
codec: CompressionType::LZ4_FRAME,
method: BodyCompressionMethod::BUFFER,
}
}
}
pub struct BodyCompressionBuilder<'a: 'b, 'b> {
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>,
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
}
impl<'a: 'b, 'b> BodyCompressionBuilder<'a, 'b> {
#[inline]
pub fn add_codec(&mut self, codec: CompressionType) {
self.fbb_.push_slot::<CompressionType>(
BodyCompression::VT_CODEC,
codec,
CompressionType::LZ4_FRAME,
);
}
#[inline]
pub fn add_method(&mut self, method: BodyCompressionMethod) {
self.fbb_.push_slot::<BodyCompressionMethod>(
BodyCompression::VT_METHOD,
method,
BodyCompressionMethod::BUFFER,
);
}
#[inline]
pub fn new(
_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
) -> BodyCompressionBuilder<'a, 'b> {
let start = _fbb.start_table();
BodyCompressionBuilder {
fbb_: _fbb,
start_: start,
}
}
#[inline]
pub fn finish(self) -> flatbuffers::WIPOffset<BodyCompression<'a>> {
let o = self.fbb_.end_table(self.start_);
flatbuffers::WIPOffset::new(o.value())
}
}
impl std::fmt::Debug for BodyCompression<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut ds = f.debug_struct("BodyCompression");
ds.field("codec", &self.codec());
ds.field("method", &self.method());
ds.finish()
}
}
pub enum RecordBatchOffset {}
#[derive(Copy, Clone, PartialEq)]
/// A data header describing the shared memory layout of a "record" or "row"
/// batch. Some systems call this a "row batch" internally and others a "record
/// batch".
pub struct RecordBatch<'a> {
pub _tab: flatbuffers::Table<'a>,
}
impl<'a> flatbuffers::Follow<'a> for RecordBatch<'a> {
type Inner = RecordBatch<'a>;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
Self {
_tab: flatbuffers::Table { buf, loc },
}
}
}
impl<'a> RecordBatch<'a> {
#[inline]
pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
RecordBatch { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>,
args: &'args RecordBatchArgs<'args>,
) -> flatbuffers::WIPOffset<RecordBatch<'bldr>> {
let mut builder = RecordBatchBuilder::new(_fbb);
builder.add_length(args.length);
if let Some(x) = args.compression {
builder.add_compression(x);
}
if let Some(x) = args.buffers {
builder.add_buffers(x);
}
if let Some(x) = args.nodes {
builder.add_nodes(x);
}
builder.finish()
}
pub const VT_LENGTH: flatbuffers::VOffsetT = 4;
pub const VT_NODES: flatbuffers::VOffsetT = 6;
pub const VT_BUFFERS: flatbuffers::VOffsetT = 8;
pub const VT_COMPRESSION: flatbuffers::VOffsetT = 10;
/// number of records / rows. The arrays in the batch should all have this
/// length
#[inline]
pub fn length(&self) -> i64 {
self._tab
.get::<i64>(RecordBatch::VT_LENGTH, Some(0))
.unwrap()
}
/// Nodes correspond to the pre-ordered flattened logical schema
#[inline]
pub fn nodes(&self) -> Option<&'a [FieldNode]> {
self._tab
.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, FieldNode>>>(
RecordBatch::VT_NODES,
None,
)
.map(|v| v.safe_slice())
}
/// Buffers correspond to the pre-ordered flattened buffer tree
///
/// The number of buffers appended to this list depends on the schema. For
/// example, most primitive arrays will have 2 buffers, 1 for the validity
/// bitmap and 1 for the values. For struct arrays, there will only be a
/// single buffer for the validity (nulls) bitmap
#[inline]
pub fn buffers(&self) -> Option<&'a [Buffer]> {
self._tab
.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, Buffer>>>(
RecordBatch::VT_BUFFERS,
None,
)
.map(|v| v.safe_slice())
}
/// Optional compression of the message body
#[inline]
pub fn compression(&self) -> Option<BodyCompression<'a>> {
self._tab
.get::<flatbuffers::ForwardsUOffset<BodyCompression>>(
RecordBatch::VT_COMPRESSION,
None,
)
}
}
impl flatbuffers::Verifiable for RecordBatch<'_> {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
v.visit_table(pos)?
.visit_field::<i64>(&"length", Self::VT_LENGTH, false)?
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, FieldNode>>>(&"nodes", Self::VT_NODES, false)?
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, Buffer>>>(&"buffers", Self::VT_BUFFERS, false)?
.visit_field::<flatbuffers::ForwardsUOffset<BodyCompression>>(&"compression", Self::VT_COMPRESSION, false)?
.finish();
Ok(())
}
}
pub struct RecordBatchArgs<'a> {
pub length: i64,
pub nodes: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, FieldNode>>>,
pub buffers: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, Buffer>>>,
pub compression: Option<flatbuffers::WIPOffset<BodyCompression<'a>>>,
}
impl<'a> Default for RecordBatchArgs<'a> {
#[inline]
fn default() -> Self {
RecordBatchArgs {
length: 0,
nodes: None,
buffers: None,
compression: None,
}
}
}
pub struct RecordBatchBuilder<'a: 'b, 'b> {
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>,
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
}
impl<'a: 'b, 'b> RecordBatchBuilder<'a, 'b> {
#[inline]
pub fn add_length(&mut self, length: i64) {
self.fbb_
.push_slot::<i64>(RecordBatch::VT_LENGTH, length, 0);
}
#[inline]
pub fn add_nodes(
&mut self,
nodes: flatbuffers::WIPOffset<flatbuffers::Vector<'b, FieldNode>>,
) {
self.fbb_
.push_slot_always::<flatbuffers::WIPOffset<_>>(RecordBatch::VT_NODES, nodes);
}
#[inline]
pub fn add_buffers(
&mut self,
buffers: flatbuffers::WIPOffset<flatbuffers::Vector<'b, Buffer>>,
) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(
RecordBatch::VT_BUFFERS,
buffers,
);
}
#[inline]
pub fn add_compression(
&mut self,
compression: flatbuffers::WIPOffset<BodyCompression<'b>>,
) {
self.fbb_
.push_slot_always::<flatbuffers::WIPOffset<BodyCompression>>(
RecordBatch::VT_COMPRESSION,
compression,
);
}
#[inline]
pub fn new(
_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
) -> RecordBatchBuilder<'a, 'b> {
let start = _fbb.start_table();
RecordBatchBuilder {
fbb_: _fbb,
start_: start,
}
}
#[inline]
pub fn finish(self) -> flatbuffers::WIPOffset<RecordBatch<'a>> {
let o = self.fbb_.end_table(self.start_);
flatbuffers::WIPOffset::new(o.value())
}
}
impl std::fmt::Debug for RecordBatch<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut ds = f.debug_struct("RecordBatch");
ds.field("length", &self.length());
ds.field("nodes", &self.nodes());
ds.field("buffers", &self.buffers());
ds.field("compression", &self.compression());
ds.finish()
}
}
pub enum DictionaryBatchOffset {}
#[derive(Copy, Clone, PartialEq)]
/// For sending dictionary encoding information. Any Field can be
/// dictionary-encoded, but in this case none of its children may be
/// dictionary-encoded.
/// There is one vector / column per dictionary, but that vector / column
/// may be spread across multiple dictionary batches by using the isDelta
/// flag
pub struct DictionaryBatch<'a> {
pub _tab: flatbuffers::Table<'a>,
}
impl<'a> flatbuffers::Follow<'a> for DictionaryBatch<'a> {
type Inner = DictionaryBatch<'a>;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
Self {
_tab: flatbuffers::Table { buf, loc },
}
}
}
impl<'a> DictionaryBatch<'a> {
#[inline]
pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
DictionaryBatch { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>,
args: &'args DictionaryBatchArgs<'args>,
) -> flatbuffers::WIPOffset<DictionaryBatch<'bldr>> {
let mut builder = DictionaryBatchBuilder::new(_fbb);
builder.add_id(args.id);
if let Some(x) = args.data {
builder.add_data(x);
}
builder.add_isDelta(args.isDelta);
builder.finish()
}
pub const VT_ID: flatbuffers::VOffsetT = 4;
pub const VT_DATA: flatbuffers::VOffsetT = 6;
pub const VT_ISDELTA: flatbuffers::VOffsetT = 8;
#[inline]
pub fn id(&self) -> i64 {
self._tab
.get::<i64>(DictionaryBatch::VT_ID, Some(0))
.unwrap()
}
#[inline]
pub fn data(&self) -> Option<RecordBatch<'a>> {
self._tab.get::<flatbuffers::ForwardsUOffset<RecordBatch>>(
DictionaryBatch::VT_DATA,
None,
)
}
/// If isDelta is true the values in the dictionary are to be appended to a
/// dictionary with the indicated id. If isDelta is false this dictionary
/// should replace the existing dictionary.
#[inline]
pub fn isDelta(&self) -> bool {
self._tab
.get::<bool>(DictionaryBatch::VT_ISDELTA, Some(false))
.unwrap()
}
}
impl flatbuffers::Verifiable for DictionaryBatch<'_> {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
v.visit_table(pos)?
.visit_field::<i64>(&"id", Self::VT_ID, false)?
.visit_field::<flatbuffers::ForwardsUOffset<RecordBatch>>(
&"data",
Self::VT_DATA,
false,
)?
.visit_field::<bool>(&"isDelta", Self::VT_ISDELTA, false)?
.finish();
Ok(())
}
}
pub struct DictionaryBatchArgs<'a> {
pub id: i64,
pub data: Option<flatbuffers::WIPOffset<RecordBatch<'a>>>,
pub isDelta: bool,
}
impl<'a> Default for DictionaryBatchArgs<'a> {
#[inline]
fn default() -> Self {
DictionaryBatchArgs {
id: 0,
data: None,
isDelta: false,
}
}
}
pub struct DictionaryBatchBuilder<'a: 'b, 'b> {
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>,
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
}
impl<'a: 'b, 'b> DictionaryBatchBuilder<'a, 'b> {
#[inline]
pub fn add_id(&mut self, id: i64) {
self.fbb_.push_slot::<i64>(DictionaryBatch::VT_ID, id, 0);
}
#[inline]
pub fn add_data(&mut self, data: flatbuffers::WIPOffset<RecordBatch<'b>>) {
self.fbb_
.push_slot_always::<flatbuffers::WIPOffset<RecordBatch>>(
DictionaryBatch::VT_DATA,
data,
);
}
#[inline]
pub fn add_isDelta(&mut self, isDelta: bool) {
self.fbb_
.push_slot::<bool>(DictionaryBatch::VT_ISDELTA, isDelta, false);
}
#[inline]
pub fn new(
_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
) -> DictionaryBatchBuilder<'a, 'b> {
let start = _fbb.start_table();
DictionaryBatchBuilder {
fbb_: _fbb,
start_: start,
}
}
#[inline]
pub fn finish(self) -> flatbuffers::WIPOffset<DictionaryBatch<'a>> {
let o = self.fbb_.end_table(self.start_);
flatbuffers::WIPOffset::new(o.value())
}
}
impl std::fmt::Debug for DictionaryBatch<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut ds = f.debug_struct("DictionaryBatch");
ds.field("id", &self.id());
ds.field("data", &self.data());
ds.field("isDelta", &self.isDelta());
ds.finish()
}
}
pub enum MessageOffset {}
#[derive(Copy, Clone, PartialEq)]
pub struct Message<'a> {
pub _tab: flatbuffers::Table<'a>,
}
impl<'a> flatbuffers::Follow<'a> for Message<'a> {
type Inner = Message<'a>;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
Self {
_tab: flatbuffers::Table { buf, loc },
}
}
}
impl<'a> Message<'a> {
#[inline]
pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
Message { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>,
args: &'args MessageArgs<'args>,
) -> flatbuffers::WIPOffset<Message<'bldr>> {
let mut builder = MessageBuilder::new(_fbb);
builder.add_bodyLength(args.bodyLength);
if let Some(x) = args.custom_metadata {
builder.add_custom_metadata(x);
}
if let Some(x) = args.header {
builder.add_header(x);
}
builder.add_version(args.version);
builder.add_header_type(args.header_type);
builder.finish()
}
pub const VT_VERSION: flatbuffers::VOffsetT = 4;
pub const VT_HEADER_TYPE: flatbuffers::VOffsetT = 6;
pub const VT_HEADER: flatbuffers::VOffsetT = 8;
pub const VT_BODYLENGTH: flatbuffers::VOffsetT = 10;
pub const VT_CUSTOM_METADATA: flatbuffers::VOffsetT = 12;
#[inline]
pub fn version(&self) -> MetadataVersion {
self._tab
.get::<MetadataVersion>(Message::VT_VERSION, Some(MetadataVersion::V1))
.unwrap()
}
#[inline]
pub fn header_type(&self) -> MessageHeader {
self._tab
.get::<MessageHeader>(Message::VT_HEADER_TYPE, Some(MessageHeader::NONE))
.unwrap()
}
#[inline]
pub fn header(&self) -> Option<flatbuffers::Table<'a>> {
self._tab
.get::<flatbuffers::ForwardsUOffset<flatbuffers::Table<'a>>>(
Message::VT_HEADER,
None,
)
}
#[inline]
pub fn bodyLength(&self) -> i64 {
self._tab
.get::<i64>(Message::VT_BODYLENGTH, Some(0))
.unwrap()
}
#[inline]
pub fn custom_metadata(
&self,
) -> Option<flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset<KeyValue<'a>>>> {
self._tab.get::<flatbuffers::ForwardsUOffset<
flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset<KeyValue>>,
>>(Message::VT_CUSTOM_METADATA, None)
}
#[inline]
#[allow(non_snake_case)]
pub fn header_as_schema(&self) -> Option<Schema<'a>> {
if self.header_type() == MessageHeader::Schema {
self.header().map(Schema::init_from_table)
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn header_as_dictionary_batch(&self) -> Option<DictionaryBatch<'a>> {
if self.header_type() == MessageHeader::DictionaryBatch {
self.header().map(DictionaryBatch::init_from_table)
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn header_as_record_batch(&self) -> Option<RecordBatch<'a>> {
if self.header_type() == MessageHeader::RecordBatch {
self.header().map(RecordBatch::init_from_table)
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn header_as_tensor(&self) -> Option<Tensor<'a>> {
if self.header_type() == MessageHeader::Tensor {
self.header().map(Tensor::init_from_table)
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn header_as_sparse_tensor(&self) -> Option<SparseTensor<'a>> {
if self.header_type() == MessageHeader::SparseTensor {
self.header().map(SparseTensor::init_from_table)
} else {
None
}
}
}
impl flatbuffers::Verifiable for Message<'_> {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier,
pos: usize,
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use flatbuffers::Verifiable;
v.visit_table(pos)?
.visit_field::<MetadataVersion>(&"version", Self::VT_VERSION, false)?
.visit_union::<MessageHeader, _>(&"header_type", Self::VT_HEADER_TYPE, &"header", Self::VT_HEADER, false, |key, v, pos| {
match key {
MessageHeader::Schema => v.verify_union_variant::<flatbuffers::ForwardsUOffset<Schema>>("MessageHeader::Schema", pos),
MessageHeader::DictionaryBatch => v.verify_union_variant::<flatbuffers::ForwardsUOffset<DictionaryBatch>>("MessageHeader::DictionaryBatch", pos),
MessageHeader::RecordBatch => v.verify_union_variant::<flatbuffers::ForwardsUOffset<RecordBatch>>("MessageHeader::RecordBatch", pos),
MessageHeader::Tensor => v.verify_union_variant::<flatbuffers::ForwardsUOffset<Tensor>>("MessageHeader::Tensor", pos),
MessageHeader::SparseTensor => v.verify_union_variant::<flatbuffers::ForwardsUOffset<SparseTensor>>("MessageHeader::SparseTensor", pos),
_ => Ok(()),
}
})?
.visit_field::<i64>(&"bodyLength", Self::VT_BODYLENGTH, false)?
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, flatbuffers::ForwardsUOffset<KeyValue>>>>(&"custom_metadata", Self::VT_CUSTOM_METADATA, false)?
.finish();
Ok(())
}
}
pub struct MessageArgs<'a> {
pub version: MetadataVersion,
pub header_type: MessageHeader,
pub header: Option<flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>>,
pub bodyLength: i64,
pub custom_metadata: Option<
flatbuffers::WIPOffset<
flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset<KeyValue<'a>>>,
>,
>,
}
impl<'a> Default for MessageArgs<'a> {
#[inline]
fn default() -> Self {
MessageArgs {
version: MetadataVersion::V1,
header_type: MessageHeader::NONE,
header: None,
bodyLength: 0,
custom_metadata: None,
}
}
}
pub struct MessageBuilder<'a: 'b, 'b> {
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>,
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
}
impl<'a: 'b, 'b> MessageBuilder<'a, 'b> {
#[inline]
pub fn add_version(&mut self, version: MetadataVersion) {
self.fbb_.push_slot::<MetadataVersion>(
Message::VT_VERSION,
version,
MetadataVersion::V1,
);
}
#[inline]
pub fn add_header_type(&mut self, header_type: MessageHeader) {
self.fbb_.push_slot::<MessageHeader>(
Message::VT_HEADER_TYPE,
header_type,
MessageHeader::NONE,
);
}
#[inline]
pub fn add_header(
&mut self,
header: flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>,
) {
self.fbb_
.push_slot_always::<flatbuffers::WIPOffset<_>>(Message::VT_HEADER, header);
}
#[inline]
pub fn add_bodyLength(&mut self, bodyLength: i64) {
self.fbb_
.push_slot::<i64>(Message::VT_BODYLENGTH, bodyLength, 0);
}
#[inline]
pub fn add_custom_metadata(
&mut self,
custom_metadata: flatbuffers::WIPOffset<
flatbuffers::Vector<'b, flatbuffers::ForwardsUOffset<KeyValue<'b>>>,
>,
) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(
Message::VT_CUSTOM_METADATA,
custom_metadata,
);
}
#[inline]
pub fn new(
_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
) -> MessageBuilder<'a, 'b> {
let start = _fbb.start_table();
MessageBuilder {
fbb_: _fbb,
start_: start,
}
}
#[inline]
pub fn finish(self) -> flatbuffers::WIPOffset<Message<'a>> {
let o = self.fbb_.end_table(self.start_);
flatbuffers::WIPOffset::new(o.value())
}
}
impl std::fmt::Debug for Message<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut ds = f.debug_struct("Message");
ds.field("version", &self.version());
ds.field("header_type", &self.header_type());
match self.header_type() {
MessageHeader::Schema => {
if let Some(x) = self.header_as_schema() {
ds.field("header", &x)
} else {
ds.field(
"header",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
MessageHeader::DictionaryBatch => {
if let Some(x) = self.header_as_dictionary_batch() {
ds.field("header", &x)
} else {
ds.field(
"header",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
MessageHeader::RecordBatch => {
if let Some(x) = self.header_as_record_batch() {
ds.field("header", &x)
} else {
ds.field(
"header",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
MessageHeader::Tensor => {
if let Some(x) = self.header_as_tensor() {
ds.field("header", &x)
} else {
ds.field(
"header",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
MessageHeader::SparseTensor => {
if let Some(x) = self.header_as_sparse_tensor() {
ds.field("header", &x)
} else {
ds.field(
"header",
&"InvalidFlatbuffer: Union discriminant does not match value.",
)
}
}
_ => {
let x: Option<()> = None;
ds.field("header", &x)
}
};
ds.field("bodyLength", &self.bodyLength());
ds.field("custom_metadata", &self.custom_metadata());
ds.finish()
}
}
#[inline]
#[deprecated(since = "2.0.0", note = "Deprecated in favor of `root_as...` methods.")]
pub fn get_root_as_message<'a>(buf: &'a [u8]) -> Message<'a> {
unsafe { flatbuffers::root_unchecked::<Message<'a>>(buf) }
}
#[inline]
#[deprecated(since = "2.0.0", note = "Deprecated in favor of `root_as...` methods.")]
pub fn get_size_prefixed_root_as_message<'a>(buf: &'a [u8]) -> Message<'a> {
unsafe { flatbuffers::size_prefixed_root_unchecked::<Message<'a>>(buf) }
}
#[inline]
/// Verifies that a buffer of bytes contains a `Message`
/// and returns it.
/// Note that verification is still experimental and may not
/// catch every error, or be maximally performant. For the
/// previous, unchecked, behavior use
/// `root_as_message_unchecked`.
pub fn root_as_message(buf: &[u8]) -> Result<Message, flatbuffers::InvalidFlatbuffer> {
flatbuffers::root::<Message>(buf)
}
#[inline]
/// Verifies that a buffer of bytes contains a size prefixed
/// `Message` and returns it.
/// Note that verification is still experimental and may not
/// catch every error, or be maximally performant. For the
/// previous, unchecked, behavior use
/// `size_prefixed_root_as_message_unchecked`.
pub fn size_prefixed_root_as_message(
buf: &[u8],
) -> Result<Message, flatbuffers::InvalidFlatbuffer> {
flatbuffers::size_prefixed_root::<Message>(buf)
}
#[inline]
/// Verifies, with the given options, that a buffer of bytes
/// contains a `Message` and returns it.
/// Note that verification is still experimental and may not
/// catch every error, or be maximally performant. For the
/// previous, unchecked, behavior use
/// `root_as_message_unchecked`.
pub fn root_as_message_with_opts<'b, 'o>(
opts: &'o flatbuffers::VerifierOptions,
buf: &'b [u8],
) -> Result<Message<'b>, flatbuffers::InvalidFlatbuffer> {
flatbuffers::root_with_opts::<Message<'b>>(opts, buf)
}
#[inline]
/// Verifies, with the given verifier options, that a buffer of
/// bytes contains a size prefixed `Message` and returns
/// it. Note that verification is still experimental and may not
/// catch every error, or be maximally performant. For the
/// previous, unchecked, behavior use
/// `root_as_message_unchecked`.
pub fn size_prefixed_root_as_message_with_opts<'b, 'o>(
opts: &'o flatbuffers::VerifierOptions,
buf: &'b [u8],
) -> Result<Message<'b>, flatbuffers::InvalidFlatbuffer> {
flatbuffers::size_prefixed_root_with_opts::<Message<'b>>(opts, buf)
}
#[inline]
/// Assumes, without verification, that a buffer of bytes contains a Message and returns it.
/// # Safety
/// Callers must trust the given bytes do indeed contain a valid `Message`.
pub unsafe fn root_as_message_unchecked(buf: &[u8]) -> Message {
flatbuffers::root_unchecked::<Message>(buf)
}
#[inline]
/// Assumes, without verification, that a buffer of bytes contains a size prefixed Message and returns it.
/// # Safety
/// Callers must trust the given bytes do indeed contain a valid size prefixed `Message`.
pub unsafe fn size_prefixed_root_as_message_unchecked(buf: &[u8]) -> Message {
flatbuffers::size_prefixed_root_unchecked::<Message>(buf)
}
#[inline]
pub fn finish_message_buffer<'a, 'b>(
fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
root: flatbuffers::WIPOffset<Message<'a>>,
) {
fbb.finish(root, None);
}
#[inline]
pub fn finish_size_prefixed_message_buffer<'a, 'b>(
fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
root: flatbuffers::WIPOffset<Message<'a>>,
) {
fbb.finish_size_prefixed(root, None);
}