1// Licensed to the Apache Software Foundation (ASF) under one
2// or more contributor license agreements. See the NOTICE file
3// distributed with this work for additional information
4// regarding copyright ownership. The ASF licenses this file
5// to you under the Apache License, Version 2.0 (the
6// "License"); you may not use this file except in compliance
7// with the License. You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing,
12// software distributed under the License is distributed on an
13// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14// KIND, either express or implied. See the License for the
15// specific language governing permissions and limitations
16// under the License.
17
18// Unit tests for DataType (and subclasses), Field, and Schema
19
20#include <cstdint>
21#include <memory>
22#include <string>
23#include <vector>
24
25#include <gtest/gtest.h>
26
27#include "arrow/buffer.h"
28#include "arrow/tensor.h"
29#include "arrow/test-util.h"
30#include "arrow/type.h"
31
32namespace arrow {
33
34TEST(TestTensor, ZeroDim) {
35 const int64_t values = 1;
36 std::vector<int64_t> shape = {};
37
38 using T = int64_t;
39
40 std::shared_ptr<Buffer> buffer;
41 ASSERT_OK(AllocateBuffer(values * sizeof(T), &buffer));
42
43 Tensor t0(int64(), buffer, shape);
44
45 ASSERT_EQ(1, t0.size());
46}
47
48TEST(TestTensor, BasicCtors) {
49 const int64_t values = 24;
50 std::vector<int64_t> shape = {4, 6};
51 std::vector<int64_t> strides = {48, 8};
52 std::vector<std::string> dim_names = {"foo", "bar"};
53
54 using T = int64_t;
55
56 std::shared_ptr<Buffer> buffer;
57 ASSERT_OK(AllocateBuffer(values * sizeof(T), &buffer));
58
59 Tensor t1(int64(), buffer, shape);
60 Tensor t2(int64(), buffer, shape, strides);
61 Tensor t3(int64(), buffer, shape, strides, dim_names);
62
63 ASSERT_EQ(24, t1.size());
64 ASSERT_TRUE(t1.is_mutable());
65
66 ASSERT_EQ(strides, t1.strides());
67 ASSERT_EQ(strides, t2.strides());
68
69 ASSERT_EQ("foo", t3.dim_name(0));
70 ASSERT_EQ("bar", t3.dim_name(1));
71 ASSERT_EQ("", t1.dim_name(0));
72 ASSERT_EQ("", t1.dim_name(1));
73}
74
75TEST(TestTensor, IsContiguous) {
76 const int64_t values = 24;
77 std::vector<int64_t> shape = {4, 6};
78 std::vector<int64_t> strides = {48, 8};
79
80 using T = int64_t;
81
82 std::shared_ptr<Buffer> buffer;
83 ASSERT_OK(AllocateBuffer(values * sizeof(T), &buffer));
84
85 std::vector<int64_t> c_strides = {48, 8};
86 std::vector<int64_t> f_strides = {8, 32};
87 std::vector<int64_t> noncontig_strides = {8, 8};
88 Tensor t1(int64(), buffer, shape, c_strides);
89 Tensor t2(int64(), buffer, shape, f_strides);
90 Tensor t3(int64(), buffer, shape, noncontig_strides);
91
92 ASSERT_TRUE(t1.is_contiguous());
93 ASSERT_TRUE(t2.is_contiguous());
94 ASSERT_FALSE(t3.is_contiguous());
95}
96
97TEST(TestTensor, ZeroDimensionalTensor) {
98 std::vector<int64_t> shape = {0};
99
100 std::shared_ptr<Buffer> buffer;
101 ASSERT_OK(AllocateBuffer(0, &buffer));
102
103 Tensor t(int64(), buffer, shape);
104 ASSERT_EQ(t.strides().size(), 1);
105}
106
107TEST(TestNumericTensor, ElementAccessWithRowMajorStrides) {
108 std::vector<int64_t> shape = {3, 4};
109
110 std::vector<int64_t> values_i64 = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
111 std::shared_ptr<Buffer> buffer_i64(Buffer::Wrap(values_i64));
112 NumericTensor<Int64Type> t_i64(buffer_i64, shape);
113
114 ASSERT_TRUE(t_i64.is_row_major());
115 ASSERT_FALSE(t_i64.is_column_major());
116 ASSERT_TRUE(t_i64.is_contiguous());
117 ASSERT_EQ(1, t_i64.Value({0, 0}));
118 ASSERT_EQ(5, t_i64.Value({1, 0}));
119 ASSERT_EQ(6, t_i64.Value({1, 1}));
120 ASSERT_EQ(11, t_i64.Value({2, 2}));
121
122 std::vector<float> values_f32 = {1.1f, 2.1f, 3.1f, 4.1f, 5.1f, 6.1f,
123 7.1f, 8.1f, 9.1f, 10.1f, 11.1f, 12.1f};
124 std::shared_ptr<Buffer> buffer_f32(Buffer::Wrap(values_f32));
125 NumericTensor<FloatType> t_f32(buffer_f32, shape);
126
127 ASSERT_TRUE(t_f32.is_row_major());
128 ASSERT_FALSE(t_f32.is_column_major());
129 ASSERT_TRUE(t_f32.is_contiguous());
130 ASSERT_EQ(1.1f, t_f32.Value({0, 0}));
131 ASSERT_EQ(5.1f, t_f32.Value({1, 0}));
132 ASSERT_EQ(6.1f, t_f32.Value({1, 1}));
133 ASSERT_EQ(11.1f, t_f32.Value({2, 2}));
134}
135
136TEST(TestNumericTensor, ElementAccessWithColumnMajorStrides) {
137 std::vector<int64_t> shape = {3, 4};
138
139 const int64_t i64_size = sizeof(int64_t);
140 std::vector<int64_t> values_i64 = {1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12};
141 std::vector<int64_t> strides_i64 = {i64_size, i64_size * 3};
142 std::shared_ptr<Buffer> buffer_i64(Buffer::Wrap(values_i64));
143 NumericTensor<Int64Type> t_i64(buffer_i64, shape, strides_i64);
144
145 ASSERT_TRUE(t_i64.is_column_major());
146 ASSERT_FALSE(t_i64.is_row_major());
147 ASSERT_TRUE(t_i64.is_contiguous());
148 ASSERT_EQ(1, t_i64.Value({0, 0}));
149 ASSERT_EQ(2, t_i64.Value({0, 1}));
150 ASSERT_EQ(4, t_i64.Value({0, 3}));
151 ASSERT_EQ(5, t_i64.Value({1, 0}));
152 ASSERT_EQ(6, t_i64.Value({1, 1}));
153 ASSERT_EQ(11, t_i64.Value({2, 2}));
154
155 const int64_t f32_size = sizeof(float);
156 std::vector<float> values_f32 = {1.1f, 5.1f, 9.1f, 2.1f, 6.1f, 10.1f,
157 3.1f, 7.1f, 11.1f, 4.1f, 8.1f, 12.1f};
158 std::vector<int64_t> strides_f32 = {f32_size, f32_size * 3};
159 std::shared_ptr<Buffer> buffer_f32(Buffer::Wrap(values_f32));
160 NumericTensor<FloatType> t_f32(buffer_f32, shape, strides_f32);
161
162 ASSERT_TRUE(t_f32.is_column_major());
163 ASSERT_FALSE(t_f32.is_row_major());
164 ASSERT_TRUE(t_f32.is_contiguous());
165 ASSERT_EQ(1.1f, t_f32.Value({0, 0}));
166 ASSERT_EQ(2.1f, t_f32.Value({0, 1}));
167 ASSERT_EQ(4.1f, t_f32.Value({0, 3}));
168 ASSERT_EQ(5.1f, t_f32.Value({1, 0}));
169 ASSERT_EQ(6.1f, t_f32.Value({1, 1}));
170 ASSERT_EQ(11.1f, t_f32.Value({2, 2}));
171}
172
173TEST(TestNumericTensor, ElementAccessWithNonContiguousStrides) {
174 std::vector<int64_t> shape = {3, 4};
175
176 const int64_t i64_size = sizeof(int64_t);
177 std::vector<int64_t> values_i64 = {1, 2, 3, 4, 0, 0, 5, 6, 7,
178 8, 0, 0, 9, 10, 11, 12, 0, 0};
179 std::vector<int64_t> strides_i64 = {i64_size * 6, i64_size};
180 std::shared_ptr<Buffer> buffer_i64(Buffer::Wrap(values_i64));
181 NumericTensor<Int64Type> t_i64(buffer_i64, shape, strides_i64);
182
183 ASSERT_FALSE(t_i64.is_contiguous());
184 ASSERT_FALSE(t_i64.is_row_major());
185 ASSERT_FALSE(t_i64.is_column_major());
186 ASSERT_EQ(1, t_i64.Value({0, 0}));
187 ASSERT_EQ(2, t_i64.Value({0, 1}));
188 ASSERT_EQ(4, t_i64.Value({0, 3}));
189 ASSERT_EQ(5, t_i64.Value({1, 0}));
190 ASSERT_EQ(6, t_i64.Value({1, 1}));
191 ASSERT_EQ(11, t_i64.Value({2, 2}));
192
193 const int64_t f32_size = sizeof(float);
194 std::vector<float> values_f32 = {1.1f, 2.1f, 3.1f, 4.1f, 0.0f, 0.0f,
195 5.1f, 6.1f, 7.1f, 8.1f, 0.0f, 0.0f,
196 9.1f, 10.1f, 11.1f, 12.1f, 0.0f, 0.0f};
197 std::vector<int64_t> strides_f32 = {f32_size * 6, f32_size};
198 std::shared_ptr<Buffer> buffer_f32(Buffer::Wrap(values_f32));
199 NumericTensor<FloatType> t_f32(buffer_f32, shape, strides_f32);
200
201 ASSERT_FALSE(t_f32.is_contiguous());
202 ASSERT_FALSE(t_f32.is_row_major());
203 ASSERT_FALSE(t_f32.is_column_major());
204 ASSERT_EQ(1.1f, t_f32.Value({0, 0}));
205 ASSERT_EQ(2.1f, t_f32.Value({0, 1}));
206 ASSERT_EQ(4.1f, t_f32.Value({0, 3}));
207 ASSERT_EQ(5.1f, t_f32.Value({1, 0}));
208 ASSERT_EQ(6.1f, t_f32.Value({1, 1}));
209 ASSERT_EQ(11.1f, t_f32.Value({2, 2}));
210}
211
212} // namespace arrow
213