Merge branch 'master' into hand-landmarker-fix
This commit is contained in:
commit
e8f28d3d00
|
@ -723,6 +723,7 @@ cc_library(
|
||||||
"//mediapipe/framework:calculator_framework",
|
"//mediapipe/framework:calculator_framework",
|
||||||
"//mediapipe/framework/port:ret_check",
|
"//mediapipe/framework/port:ret_check",
|
||||||
"//mediapipe/framework/port:status",
|
"//mediapipe/framework/port:status",
|
||||||
|
"@org_tensorflow//tensorflow/core/platform:bfloat16",
|
||||||
] + select({
|
] + select({
|
||||||
"//conditions:default": [
|
"//conditions:default": [
|
||||||
"@org_tensorflow//tensorflow/core:framework",
|
"@org_tensorflow//tensorflow/core:framework",
|
||||||
|
@ -1139,6 +1140,7 @@ cc_test(
|
||||||
"//mediapipe/util:packet_test_util",
|
"//mediapipe/util:packet_test_util",
|
||||||
"@org_tensorflow//tensorflow/core:framework",
|
"@org_tensorflow//tensorflow/core:framework",
|
||||||
"@org_tensorflow//tensorflow/core:protos_all_cc",
|
"@org_tensorflow//tensorflow/core:protos_all_cc",
|
||||||
|
"@org_tensorflow//tensorflow/core/platform:bfloat16",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -15,12 +15,16 @@
|
||||||
// Calculator converts from one-dimensional Tensor of DT_FLOAT to vector<float>
|
// Calculator converts from one-dimensional Tensor of DT_FLOAT to vector<float>
|
||||||
// OR from (batched) two-dimensional Tensor of DT_FLOAT to vector<vector<float>.
|
// OR from (batched) two-dimensional Tensor of DT_FLOAT to vector<vector<float>.
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
#include "mediapipe/calculators/tensorflow/tensor_to_vector_float_calculator_options.pb.h"
|
#include "mediapipe/calculators/tensorflow/tensor_to_vector_float_calculator_options.pb.h"
|
||||||
#include "mediapipe/framework/calculator_framework.h"
|
#include "mediapipe/framework/calculator_framework.h"
|
||||||
#include "mediapipe/framework/port/ret_check.h"
|
#include "mediapipe/framework/port/ret_check.h"
|
||||||
#include "mediapipe/framework/port/status.h"
|
#include "mediapipe/framework/port/status.h"
|
||||||
#include "tensorflow/core/framework/tensor.h"
|
#include "tensorflow/core/framework/tensor.h"
|
||||||
#include "tensorflow/core/framework/types.h"
|
#include "tensorflow/core/framework/types.h"
|
||||||
|
#include "tensorflow/core/platform/bfloat16.h"
|
||||||
|
|
||||||
namespace mediapipe {
|
namespace mediapipe {
|
||||||
|
|
||||||
|
@ -76,21 +80,31 @@ absl::Status TensorToVectorFloatCalculator::Open(CalculatorContext* cc) {
|
||||||
absl::Status TensorToVectorFloatCalculator::Process(CalculatorContext* cc) {
|
absl::Status TensorToVectorFloatCalculator::Process(CalculatorContext* cc) {
|
||||||
const tf::Tensor& input_tensor =
|
const tf::Tensor& input_tensor =
|
||||||
cc->Inputs().Index(0).Value().Get<tf::Tensor>();
|
cc->Inputs().Index(0).Value().Get<tf::Tensor>();
|
||||||
RET_CHECK(tf::DT_FLOAT == input_tensor.dtype())
|
RET_CHECK(tf::DT_FLOAT == input_tensor.dtype() ||
|
||||||
<< "expected DT_FLOAT input but got "
|
tf::DT_BFLOAT16 == input_tensor.dtype())
|
||||||
|
<< "expected DT_FLOAT or DT_BFLOAT_16 input but got "
|
||||||
<< tensorflow::DataTypeString(input_tensor.dtype());
|
<< tensorflow::DataTypeString(input_tensor.dtype());
|
||||||
|
|
||||||
if (options_.tensor_is_2d()) {
|
if (options_.tensor_is_2d()) {
|
||||||
RET_CHECK(2 == input_tensor.dims())
|
RET_CHECK(2 == input_tensor.dims())
|
||||||
<< "Expected 2-dimensional Tensor, but the tensor shape is: "
|
<< "Expected 2-dimensional Tensor, but the tensor shape is: "
|
||||||
<< input_tensor.shape().DebugString();
|
<< input_tensor.shape().DebugString();
|
||||||
auto output = absl::make_unique<std::vector<std::vector<float>>>(
|
auto output = std::make_unique<std::vector<std::vector<float>>>(
|
||||||
input_tensor.dim_size(0), std::vector<float>(input_tensor.dim_size(1)));
|
input_tensor.dim_size(0), std::vector<float>(input_tensor.dim_size(1)));
|
||||||
for (int i = 0; i < input_tensor.dim_size(0); ++i) {
|
for (int i = 0; i < input_tensor.dim_size(0); ++i) {
|
||||||
auto& instance_output = output->at(i);
|
auto& instance_output = output->at(i);
|
||||||
const auto& slice = input_tensor.Slice(i, i + 1).unaligned_flat<float>();
|
if (tf::DT_BFLOAT16 == input_tensor.dtype()) {
|
||||||
for (int j = 0; j < input_tensor.dim_size(1); ++j) {
|
const auto& slice =
|
||||||
instance_output.at(j) = slice(j);
|
input_tensor.Slice(i, i + 1).unaligned_flat<tf::bfloat16>();
|
||||||
|
for (int j = 0; j < input_tensor.dim_size(1); ++j) {
|
||||||
|
instance_output.at(j) = static_cast<float>(slice(j));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const auto& slice =
|
||||||
|
input_tensor.Slice(i, i + 1).unaligned_flat<float>();
|
||||||
|
for (int j = 0; j < input_tensor.dim_size(1); ++j) {
|
||||||
|
instance_output.at(j) = slice(j);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp());
|
cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp());
|
||||||
|
@ -101,10 +115,17 @@ absl::Status TensorToVectorFloatCalculator::Process(CalculatorContext* cc) {
|
||||||
<< "tensor shape is: " << input_tensor.shape().DebugString();
|
<< "tensor shape is: " << input_tensor.shape().DebugString();
|
||||||
}
|
}
|
||||||
auto output =
|
auto output =
|
||||||
absl::make_unique<std::vector<float>>(input_tensor.NumElements());
|
std::make_unique<std::vector<float>>(input_tensor.NumElements());
|
||||||
const auto& tensor_values = input_tensor.unaligned_flat<float>();
|
if (tf::DT_BFLOAT16 == input_tensor.dtype()) {
|
||||||
for (int i = 0; i < input_tensor.NumElements(); ++i) {
|
const auto& tensor_values = input_tensor.unaligned_flat<tf::bfloat16>();
|
||||||
output->at(i) = tensor_values(i);
|
for (int i = 0; i < input_tensor.NumElements(); ++i) {
|
||||||
|
output->at(i) = static_cast<float>(tensor_values(i));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const auto& tensor_values = input_tensor.unaligned_flat<float>();
|
||||||
|
for (int i = 0; i < input_tensor.NumElements(); ++i) {
|
||||||
|
output->at(i) = tensor_values(i);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp());
|
cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp());
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,8 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
#include "mediapipe/calculators/tensorflow/tensor_to_vector_float_calculator_options.pb.h"
|
#include "mediapipe/calculators/tensorflow/tensor_to_vector_float_calculator_options.pb.h"
|
||||||
#include "mediapipe/framework/calculator_framework.h"
|
#include "mediapipe/framework/calculator_framework.h"
|
||||||
#include "mediapipe/framework/calculator_runner.h"
|
#include "mediapipe/framework/calculator_runner.h"
|
||||||
|
@ -19,6 +21,7 @@
|
||||||
#include "mediapipe/util/packet_test_util.h"
|
#include "mediapipe/util/packet_test_util.h"
|
||||||
#include "tensorflow/core/framework/tensor.h"
|
#include "tensorflow/core/framework/tensor.h"
|
||||||
#include "tensorflow/core/framework/types.pb.h"
|
#include "tensorflow/core/framework/types.pb.h"
|
||||||
|
#include "tensorflow/core/platform/bfloat16.h"
|
||||||
|
|
||||||
namespace mediapipe {
|
namespace mediapipe {
|
||||||
|
|
||||||
|
@ -72,6 +75,62 @@ TEST_F(TensorToVectorFloatCalculatorTest, ConvertsToVectorFloat) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(TensorToVectorFloatCalculatorTest, CheckBFloat16Type) {
|
||||||
|
SetUpRunner(false, false);
|
||||||
|
const tf::TensorShape tensor_shape(std::vector<tf::int64>{5});
|
||||||
|
auto tensor = std::make_unique<tf::Tensor>(tf::DT_BFLOAT16, tensor_shape);
|
||||||
|
auto tensor_vec = tensor->vec<tf::bfloat16>();
|
||||||
|
for (int i = 0; i < 5; ++i) {
|
||||||
|
tensor_vec(i) = static_cast<tf::bfloat16>(1 << i);
|
||||||
|
}
|
||||||
|
|
||||||
|
const int64_t time = 1234;
|
||||||
|
runner_->MutableInputs()->Index(0).packets.push_back(
|
||||||
|
Adopt(tensor.release()).At(Timestamp(time)));
|
||||||
|
|
||||||
|
EXPECT_TRUE(runner_->Run().ok());
|
||||||
|
const std::vector<Packet>& output_packets =
|
||||||
|
runner_->Outputs().Index(0).packets;
|
||||||
|
EXPECT_EQ(1, output_packets.size());
|
||||||
|
EXPECT_EQ(time, output_packets[0].Timestamp().Value());
|
||||||
|
const std::vector<float>& output_vector =
|
||||||
|
output_packets[0].Get<std::vector<float>>();
|
||||||
|
|
||||||
|
EXPECT_EQ(5, output_vector.size());
|
||||||
|
for (int i = 0; i < 5; ++i) {
|
||||||
|
const float expected = static_cast<float>(1 << i);
|
||||||
|
EXPECT_EQ(expected, output_vector[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(TensorToVectorFloatCalculatorTest, CheckBFloat16TypeAllDim) {
|
||||||
|
SetUpRunner(false, true);
|
||||||
|
const tf::TensorShape tensor_shape(std::vector<tf::int64>{2, 2, 2});
|
||||||
|
auto tensor = std::make_unique<tf::Tensor>(tf::DT_BFLOAT16, tensor_shape);
|
||||||
|
auto slice = tensor->flat<tf::bfloat16>();
|
||||||
|
for (int i = 0; i < 2 * 2 * 2; ++i) {
|
||||||
|
// 2^i can be represented exactly in floating point numbers if 'i' is small.
|
||||||
|
slice(i) = static_cast<tf::bfloat16>(1 << i);
|
||||||
|
}
|
||||||
|
|
||||||
|
const int64_t time = 1234;
|
||||||
|
runner_->MutableInputs()->Index(0).packets.push_back(
|
||||||
|
Adopt(tensor.release()).At(Timestamp(time)));
|
||||||
|
|
||||||
|
EXPECT_TRUE(runner_->Run().ok());
|
||||||
|
const std::vector<Packet>& output_packets =
|
||||||
|
runner_->Outputs().Index(0).packets;
|
||||||
|
EXPECT_EQ(1, output_packets.size());
|
||||||
|
EXPECT_EQ(time, output_packets[0].Timestamp().Value());
|
||||||
|
const std::vector<float>& output_vector =
|
||||||
|
output_packets[0].Get<std::vector<float>>();
|
||||||
|
EXPECT_EQ(2 * 2 * 2, output_vector.size());
|
||||||
|
for (int i = 0; i < 2 * 2 * 2; ++i) {
|
||||||
|
const float expected = static_cast<float>(1 << i);
|
||||||
|
EXPECT_EQ(expected, output_vector[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
TEST_F(TensorToVectorFloatCalculatorTest, ConvertsBatchedToVectorVectorFloat) {
|
TEST_F(TensorToVectorFloatCalculatorTest, ConvertsBatchedToVectorVectorFloat) {
|
||||||
SetUpRunner(true, false);
|
SetUpRunner(true, false);
|
||||||
const tf::TensorShape tensor_shape(std::vector<tf::int64>{1, 5});
|
const tf::TensorShape tensor_shape(std::vector<tf::int64>{1, 5});
|
||||||
|
|
|
@ -895,6 +895,7 @@ cc_library(
|
||||||
"@com_google_absl//absl/log:absl_check",
|
"@com_google_absl//absl/log:absl_check",
|
||||||
"@com_google_absl//absl/log:absl_log",
|
"@com_google_absl//absl/log:absl_log",
|
||||||
"@com_google_absl//absl/memory",
|
"@com_google_absl//absl/memory",
|
||||||
|
"@com_google_absl//absl/status:statusor",
|
||||||
"@com_google_absl//absl/strings",
|
"@com_google_absl//absl/strings",
|
||||||
"@com_google_absl//absl/synchronization",
|
"@com_google_absl//absl/synchronization",
|
||||||
],
|
],
|
||||||
|
@ -996,14 +997,15 @@ cc_library(
|
||||||
name = "port",
|
name = "port",
|
||||||
hdrs = ["port.h"],
|
hdrs = ["port.h"],
|
||||||
defines = select({
|
defines = select({
|
||||||
"//conditions:default": [],
|
"//conditions:default": [],
|
||||||
}) + select({
|
}) + select({
|
||||||
"//conditions:default": [],
|
"//conditions:default": [],
|
||||||
"//mediapipe/gpu:disable_gpu": ["MEDIAPIPE_DISABLE_GPU=1"],
|
"//mediapipe/gpu:disable_gpu": ["MEDIAPIPE_DISABLE_GPU=1"],
|
||||||
}) + select({
|
}) +
|
||||||
"//conditions:default": [],
|
select({
|
||||||
"//mediapipe/framework/port:disable_opencv": ["MEDIAPIPE_DISABLE_OPENCV=1"],
|
"//conditions:default": [],
|
||||||
}) + select({
|
"//mediapipe/framework/port:disable_opencv": ["MEDIAPIPE_DISABLE_OPENCV=1"],
|
||||||
|
}) + select({
|
||||||
"//conditions:default": [],
|
"//conditions:default": [],
|
||||||
# TODO: Improve this. This only sets MEDIAPIPE_DISABLE_OPENCV as a "defines" Make
|
# TODO: Improve this. This only sets MEDIAPIPE_DISABLE_OPENCV as a "defines" Make
|
||||||
# value, not as a bazel "--define" variable, which has effects in C++ code but not in
|
# value, not as a bazel "--define" variable, which has effects in C++ code but not in
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
#include <ostream>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <type_traits>
|
#include <type_traits>
|
||||||
|
|
||||||
|
@ -27,6 +28,7 @@
|
||||||
#include "absl/log/absl_check.h"
|
#include "absl/log/absl_check.h"
|
||||||
#include "absl/log/absl_log.h"
|
#include "absl/log/absl_log.h"
|
||||||
#include "absl/memory/memory.h"
|
#include "absl/memory/memory.h"
|
||||||
|
#include "absl/status/statusor.h"
|
||||||
#include "absl/strings/str_cat.h"
|
#include "absl/strings/str_cat.h"
|
||||||
#include "absl/synchronization/mutex.h"
|
#include "absl/synchronization/mutex.h"
|
||||||
#include "mediapipe/framework/deps/no_destructor.h"
|
#include "mediapipe/framework/deps/no_destructor.h"
|
||||||
|
@ -112,7 +114,18 @@ class Packet {
|
||||||
// Transfers the ownership of holder's data to a unique pointer
|
// Transfers the ownership of holder's data to a unique pointer
|
||||||
// of the object if the packet is the sole owner of a non-foreign
|
// of the object if the packet is the sole owner of a non-foreign
|
||||||
// holder. Otherwise, returns error when the packet can't be consumed.
|
// holder. Otherwise, returns error when the packet can't be consumed.
|
||||||
// See ConsumeOrCopy for threading requirements and example usage.
|
//
|
||||||
|
// --- WARNING ---
|
||||||
|
// Packet is thread-compatible and this member function is non-const. Hence,
|
||||||
|
// calling it requires exclusive access to the object - callers are
|
||||||
|
// responsible for ensuring that no other thread is doing anything with the
|
||||||
|
// packet.
|
||||||
|
//
|
||||||
|
// For example, if a node/calculator calls this function, then no other
|
||||||
|
// calculator should be processing the same packet. Nodes/calculators cannot
|
||||||
|
// enforce/guarantee this as they don't know of each other, which means graph
|
||||||
|
// must be written in a special way to account for that. It's error-prone and
|
||||||
|
// general recommendation is to avoid calling this function.
|
||||||
template <typename T>
|
template <typename T>
|
||||||
absl::StatusOr<std::unique_ptr<T>> Consume();
|
absl::StatusOr<std::unique_ptr<T>> Consume();
|
||||||
|
|
||||||
|
@ -120,13 +133,22 @@ class Packet {
|
||||||
// unique pointer if the packet is the sole owner of a non-foreign
|
// unique pointer if the packet is the sole owner of a non-foreign
|
||||||
// holder. Otherwise, the unique pointer holds a copy of the original
|
// holder. Otherwise, the unique pointer holds a copy of the original
|
||||||
// data. In either case, the original packet is set to empty. The
|
// data. In either case, the original packet is set to empty. The
|
||||||
// method returns error when the packet can't be consumed or copied. If
|
// function returns error when the packet can't be consumed or copied. If
|
||||||
// was_copied is not nullptr, it is set to indicate whether the packet
|
// was_copied is not nullptr, it is set to indicate whether the packet
|
||||||
// data was copied.
|
// data was copied.
|
||||||
// Packet is thread-compatible, therefore Packet::ConsumeOrCopy()
|
//
|
||||||
// must be thread-compatible: clients who use this function are
|
// --- WARNING ---
|
||||||
// responsible for ensuring that no other thread is doing anything
|
// Packet is thread-compatible and this member function is non-const. Hence,
|
||||||
// with the Packet.
|
// calling it requires exclusive access to the object - callers are
|
||||||
|
// responsible for ensuring that no other thread is doing anything with the
|
||||||
|
// packet.
|
||||||
|
//
|
||||||
|
// For example, if a node/calculator calls this function, then no other
|
||||||
|
// calculator should be processing the same packet. Nodes/calculators cannot
|
||||||
|
// enforce/guarantee this as they don't know of each other, which means graph
|
||||||
|
// must be written in a special way to account for that. It's error-prone and
|
||||||
|
// general recommendation is to avoid calling this function.
|
||||||
|
//
|
||||||
// Example usage:
|
// Example usage:
|
||||||
// ASSIGN_OR_RETURN(std::unique_ptr<Detection> detection,
|
// ASSIGN_OR_RETURN(std::unique_ptr<Detection> detection,
|
||||||
// p.ConsumeOrCopy<Detection>());
|
// p.ConsumeOrCopy<Detection>());
|
||||||
|
|
|
@ -82,9 +82,11 @@ class ExternalFileHandler {
|
||||||
|
|
||||||
// The aligned mapped memory buffer offset, if any.
|
// The aligned mapped memory buffer offset, if any.
|
||||||
int64 buffer_aligned_offset_{};
|
int64 buffer_aligned_offset_{};
|
||||||
|
#ifndef _WIN32
|
||||||
// The aligned mapped memory buffer size in bytes taking into account the
|
// The aligned mapped memory buffer size in bytes taking into account the
|
||||||
// offset shift introduced by buffer_aligned_memory_offset_, if any.
|
// offset shift introduced by buffer_aligned_memory_offset_, if any.
|
||||||
int64 buffer_aligned_size_{};
|
int64 buffer_aligned_size_{};
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace core
|
} // namespace core
|
||||||
|
|
Loading…
Reference in New Issue
Block a user