From c6b3090d0ee559e5036e061a7cbb1d28d564d513 Mon Sep 17 00:00:00 2001 From: MediaPipe Team Date: Thu, 6 Apr 2023 23:28:59 -0700 Subject: [PATCH] Internal change PiperOrigin-RevId: 522534050 --- mediapipe/util/tracking/box_detector.cc | 8 +-- mediapipe/util/tracking/box_tracker.cc | 26 ++++---- mediapipe/util/tracking/flow_packager.cc | 66 +++++++++---------- mediapipe/util/tracking/image_util.cc | 4 +- mediapipe/util/tracking/motion_analysis.cc | 12 ++-- mediapipe/util/tracking/motion_estimation.cc | 12 ++-- .../util/tracking/region_flow_computation.cc | 34 +++++----- mediapipe/util/tracking/tone_estimation.cc | 4 +- mediapipe/util/tracking/tone_models.cc | 4 +- .../tracking/tracked_detection_manager.cc | 6 +- mediapipe/util/tracking/tracking.cc | 6 +- 11 files changed, 91 insertions(+), 91 deletions(-) diff --git a/mediapipe/util/tracking/box_detector.cc b/mediapipe/util/tracking/box_detector.cc index e3a0eb476..58d855537 100644 --- a/mediapipe/util/tracking/box_detector.cc +++ b/mediapipe/util/tracking/box_detector.cc @@ -106,8 +106,8 @@ BoxDetectorInterface::BoxDetectorInterface(const BoxDetectorOptions &options) void BoxDetectorInterface::DetectAndAddBoxFromFeatures( const std::vector &features, const cv::Mat &descriptors, - const TimedBoxProtoList &tracked_boxes, int64 timestamp_msec, float scale_x, - float scale_y, TimedBoxProtoList *detected_boxes) { + const TimedBoxProtoList &tracked_boxes, int64_t timestamp_msec, + float scale_x, float scale_y, TimedBoxProtoList *detected_boxes) { absl::MutexLock lock_access(&access_to_index_); image_scale_ = std::min(scale_x, scale_y); image_aspect_ = scale_x / scale_y; @@ -177,7 +177,7 @@ void BoxDetectorInterface::DetectAndAddBoxFromFeatures( void BoxDetectorInterface::DetectAndAddBox( const TrackingData &tracking_data, const TimedBoxProtoList &tracked_boxes, - int64 timestamp_msec, TimedBoxProtoList *detected_boxes) { + int64_t timestamp_msec, TimedBoxProtoList *detected_boxes) { std::vector features_from_tracking_data; std::vector descriptors_from_tracking_data; FeatureAndDescriptorFromTrackingData(tracking_data, @@ -255,7 +255,7 @@ bool BoxDetectorInterface::CheckDetectAndAddBox( void BoxDetectorInterface::DetectAndAddBox( const cv::Mat &image, const TimedBoxProtoList &tracked_boxes, - int64 timestamp_msec, TimedBoxProtoList *detected_boxes) { + int64_t timestamp_msec, TimedBoxProtoList *detected_boxes) { // Determine if we need execute feature extraction. if (!CheckDetectAndAddBox(tracked_boxes)) { return; diff --git a/mediapipe/util/tracking/box_tracker.cc b/mediapipe/util/tracking/box_tracker.cc index d5c721c66..2d1af779e 100644 --- a/mediapipe/util/tracking/box_tracker.cc +++ b/mediapipe/util/tracking/box_tracker.cc @@ -112,7 +112,7 @@ void TimedBoxFromMotionBoxState(const MotionBoxState& state, TimedBox* box) { namespace { TimedBox BlendTimedBoxes(const TimedBox& lhs, const TimedBox& rhs, - int64 time_msec) { + int64_t time_msec) { CHECK_LT(lhs.time_msec, rhs.time_msec); const double alpha = (time_msec - lhs.time_msec) * 1.0 / (rhs.time_msec - lhs.time_msec); @@ -246,7 +246,7 @@ BoxTracker::BoxTracker( void BoxTracker::AddTrackingDataChunk(const TrackingDataChunk* chunk, bool copy_data) { CHECK_GT(chunk->item_size(), 0) << "Empty chunk."; - int64 chunk_time_msec = chunk->item(0).timestamp_usec() / 1000; + int64_t chunk_time_msec = chunk->item(0).timestamp_usec() / 1000; int chunk_idx = ChunkIdxFromTime(chunk_time_msec); CHECK_GE(chunk_idx, tracking_data_.size()) << "Chunk is out of order."; if (chunk_idx > tracking_data_.size()) { @@ -270,7 +270,7 @@ void BoxTracker::AddTrackingDataChunks( } void BoxTracker::NewBoxTrack(const TimedBox& initial_pos, int id, - int64 min_msec, int64 max_msec) { + int64_t min_msec, int64_t max_msec) { VLOG(1) << "New box track: " << id << " : " << initial_pos.ToString() << " from " << min_msec << " to " << max_msec; @@ -290,7 +290,7 @@ void BoxTracker::NewBoxTrack(const TimedBox& initial_pos, int id, tracking_workers_->Schedule(operation); } -std::pair BoxTracker::TrackInterval(int id) { +std::pair BoxTracker::TrackInterval(int id) { absl::MutexLock lock(&path_mutex_); const Path& path = paths_[id]; if (path.empty()) { @@ -305,7 +305,7 @@ std::pair BoxTracker::TrackInterval(int id) { } void BoxTracker::NewBoxTrackAsync(const TimedBox& initial_pos, int id, - int64 min_msec, int64 max_msec) { + int64_t min_msec, int64_t max_msec) { VLOG(1) << "Async track for id: " << id << " from " << min_msec << " to " << max_msec; @@ -483,7 +483,7 @@ void BoxTracker::CancelTracking(int id, int checkpoint) { track_status_[id][checkpoint].canceled = false; } -bool BoxTracker::GetTimedPosition(int id, int64 time_msec, TimedBox* result, +bool BoxTracker::GetTimedPosition(int id, int64_t time_msec, TimedBox* result, std::vector* states) { CHECK(result); @@ -686,7 +686,7 @@ bool BoxTracker::WaitForChunkFile(int id, int checkpoint, return file_exists; } -int BoxTracker::ClosestFrameIndex(int64 msec, +int BoxTracker::ClosestFrameIndex(int64_t msec, const TrackingDataChunk& chunk) const { CHECK_GT(chunk.item_size(), 0); typedef TrackingDataChunk::Item Item; @@ -708,8 +708,8 @@ int BoxTracker::ClosestFrameIndex(int64 msec, } // Determine closest timestamp. - const int64 lhs_diff = msec - chunk.item(pos - 1).timestamp_usec() / 1000; - const int64 rhs_diff = chunk.item(pos).timestamp_usec() / 1000 - msec; + const int64_t lhs_diff = msec - chunk.item(pos - 1).timestamp_usec() / 1000; + const int64_t rhs_diff = chunk.item(pos).timestamp_usec() / 1000 - msec; if (std::min(lhs_diff, rhs_diff) >= 67) { LOG(ERROR) << "No frame found within 67ms, probably using wrong chunk."; @@ -849,7 +849,7 @@ void BoxTracker::TrackingImpl(const TrackingImplArgs& a) { MotionVectorFrame mvf; MotionVectorFrameFromTrackingData(a.chunk_data->item(f).tracking_data(), &mvf); - const int64 track_duration_ms = + const int64_t track_duration_ms = TrackingDataDurationMs(a.chunk_data->item(f)); if (track_duration_ms > 0) { mvf.duration_ms = track_duration_ms; @@ -905,8 +905,8 @@ void BoxTracker::TrackingImpl(const TrackingImplArgs& a) { cleanup_func(); } -bool TimedBoxAtTime(const PathSegment& segment, int64 time_msec, TimedBox* box, - MotionBoxState* state) { +bool TimedBoxAtTime(const PathSegment& segment, int64_t time_msec, + TimedBox* box, MotionBoxState* state) { CHECK(box); if (segment.empty()) { @@ -1028,7 +1028,7 @@ bool BoxTracker::WaitForAllOngoingTracks(int timeout_us) { return !IsTrackingOngoingMutexHeld(); } -bool BoxTracker::GetTrackingData(int id, int64 request_time_msec, +bool BoxTracker::GetTrackingData(int id, int64_t request_time_msec, TrackingData* tracking_data, int* tracking_data_msec) { CHECK(tracking_data); diff --git a/mediapipe/util/tracking/flow_packager.cc b/mediapipe/util/tracking/flow_packager.cc index a12340949..dceacbcd9 100644 --- a/mediapipe/util/tracking/flow_packager.cc +++ b/mediapipe/util/tracking/flow_packager.cc @@ -276,7 +276,7 @@ void FlowPackager::EncodeTrackingData(const TrackingData& tracking_data, CHECK(options_.binary_tracking_data_support()); CHECK(binary_data != nullptr); - int32 frame_flags = 0; + int32_t frame_flags = 0; const bool high_profile = options_.use_high_profile(); if (high_profile) { frame_flags |= TrackingData::FLAG_PROFILE_HIGH; @@ -293,7 +293,7 @@ void FlowPackager::EncodeTrackingData(const TrackingData& tracking_data, tracking_data.frame_flags() & TrackingData::FLAG_BACKGROUND_UNSTABLE; const TrackingData::MotionData& motion_data = tracking_data.motion_data(); - int32 num_vectors = motion_data.num_elements(); + int32_t num_vectors = motion_data.num_elements(); // Compute maximum vector or delta vector value. float max_vector_value = 0; @@ -311,8 +311,8 @@ void FlowPackager::EncodeTrackingData(const TrackingData& tracking_data, } } - const int32 domain_width = tracking_data.domain_width(); - const int32 domain_height = tracking_data.domain_height(); + const int32_t domain_width = tracking_data.domain_width(); + const int32_t domain_height = tracking_data.domain_height(); CHECK_LT(domain_height, 256) << "Only heights below 256 are supported."; const float frame_aspect = tracking_data.frame_aspect(); @@ -338,20 +338,20 @@ void FlowPackager::EncodeTrackingData(const TrackingData& tracking_data, int scale_16 = std::ceil(kByteMax16 / max_vector_value); int scale_8 = std::ceil(kByteMax8 / max_vector_value); - const int32 scale = + const int32_t scale = options_.high_fidelity_16bit_encode() ? scale_16 : scale_8; const float inv_scale = 1.0f / scale; const int kByteMax = options_.high_fidelity_16bit_encode() ? kByteMax16 : kByteMax8; // Compressed flow to be encoded in binary format. - std::vector flow_compressed_16; - std::vector flow_compressed_8; + std::vector flow_compressed_16; + std::vector flow_compressed_8; flow_compressed_16.reserve(num_vectors); flow_compressed_8.reserve(num_vectors); - std::vector row_idx; + std::vector row_idx; row_idx.reserve(num_vectors); float average_error = 0; @@ -538,7 +538,7 @@ void FlowPackager::EncodeTrackingData(const TrackingData& tracking_data, } // Delta compress col_starts. - std::vector col_start_delta(domain_width + 1, 0); + std::vector col_start_delta(domain_width + 1, 0); col_start_delta[0] = col_starts[0]; for (int k = 1; k < domain_width + 1; ++k) { const int delta = col_starts[k] - col_starts[k - 1]; @@ -575,10 +575,10 @@ void FlowPackager::EncodeTrackingData(const TrackingData& tracking_data, std::string* data = binary_data->mutable_data(); data->clear(); - int32 vector_size = options_.high_fidelity_16bit_encode() - ? flow_compressed_16.size() - : flow_compressed_8.size(); - int32 row_idx_size = row_idx.size(); + int32_t vector_size = options_.high_fidelity_16bit_encode() + ? flow_compressed_16.size() + : flow_compressed_8.size(); + int32_t row_idx_size = row_idx.size(); absl::StrAppend(data, EncodeToString(frame_flags), EncodeToString(domain_width), EncodeToString(domain_height), @@ -605,12 +605,12 @@ void FlowPackager::DecodeTrackingData(const BinaryTrackingData& container_data, CHECK(tracking_data != nullptr); absl::string_view data(container_data.data()); - int32 frame_flags = 0; - int32 domain_width = 0; - int32 domain_height = 0; + int32_t frame_flags = 0; + int32_t domain_width = 0; + int32_t domain_height = 0; std::vector background_model; - int32 scale = 0; - int32 num_vectors = 0; + int32_t scale = 0; + int32_t num_vectors = 0; float frame_aspect = 0.0f; DecodeFromStringView(PopSubstring(4, &data), &frame_flags); @@ -642,7 +642,7 @@ void FlowPackager::DecodeTrackingData(const BinaryTrackingData& container_data, frame_flags & TrackingData::FLAG_HIGH_FIDELITY_VECTORS; const float flow_denom = 1.0f / scale; - std::vector col_starts_delta; + std::vector col_starts_delta; DecodeVectorFromStringView(PopSubstring(domain_width + 1, &data), &col_starts_delta); @@ -656,8 +656,8 @@ void FlowPackager::DecodeTrackingData(const BinaryTrackingData& container_data, col_starts.push_back(column); } - std::vector row_idx; - int32 row_idx_size; + std::vector row_idx; + int32_t row_idx_size; DecodeFromStringView(PopSubstring(4, &data), &row_idx_size); // Should not have more row indices than vectors. (One for each in baseline @@ -676,14 +676,14 @@ void FlowPackager::DecodeTrackingData(const BinaryTrackingData& container_data, const int kIndexMask = FlowPackagerOptions::INDEX_MASK; std::vector column_expansions(domain_width, 0); - std::vector row_idx_unpacked; + std::vector row_idx_unpacked; row_idx_unpacked.reserve(num_vectors); advance.clear(); for (int c = 0; c < col_starts.size() - 1; ++c) { const int r_start = col_starts[c]; const int r_end = col_starts[c + 1]; - uint8 prev_row_idx = 0; + uint8_t prev_row_idx = 0; for (int r = r_start; r < r_end; ++r) { // Use top bit as indicator to advance. advance.push_back(row_idx[r] & kAdvanceFlag); @@ -725,7 +725,7 @@ void FlowPackager::DecodeTrackingData(const BinaryTrackingData& container_data, int prev_flow_x = 0; int prev_flow_y = 0; if (high_fidelity) { - std::vector vector_data; + std::vector vector_data; DecodeVectorFromStringView( PopSubstring(sizeof(vector_data[0]) * vector_data_size, &data), &vector_data); @@ -751,7 +751,7 @@ void FlowPackager::DecodeTrackingData(const BinaryTrackingData& container_data, } CHECK_EQ(vector_data_size, counter); } else { - std::vector vector_data; + std::vector vector_data; DecodeVectorFromStringView( PopSubstring(sizeof(vector_data[0]) * vector_data_size, &data), &vector_data); @@ -813,13 +813,13 @@ void FlowPackager::DecodeMetaData(const TrackingContainer& container_data, absl::string_view data(container_data.data()); - int32 num_frames; + int32_t num_frames; DecodeFromStringView(PopSubstring(4, &data), &num_frames); meta_data->set_num_frames(num_frames); for (int k = 0; k < num_frames; ++k) { - int32 msec; - int32 stream_offset; + int32_t msec; + int32_t stream_offset; DecodeFromStringView(PopSubstring(4, &data), &msec); DecodeFromStringView(PopSubstring(4, &data), &stream_offset); @@ -831,14 +831,14 @@ void FlowPackager::DecodeMetaData(const TrackingContainer& container_data, } void FlowPackager::FinalizeTrackingContainerFormat( - std::vector* timestamps, + std::vector* timestamps, TrackingContainerFormat* container_format) { CHECK(container_format != nullptr); // Compute binary sizes of track_data. const int num_frames = container_format->track_data_size(); - std::vector msecs(num_frames, 0); + std::vector msecs(num_frames, 0); if (timestamps) { CHECK_EQ(num_frames, timestamps->size()); msecs = *timestamps; @@ -876,13 +876,13 @@ void FlowPackager::FinalizeTrackingContainerFormat( } void FlowPackager::FinalizeTrackingContainerProto( - std::vector* timestamps, TrackingContainerProto* proto) { + std::vector* timestamps, TrackingContainerProto* proto) { CHECK(proto != nullptr); // Compute binary sizes of track_data. const int num_frames = proto->track_data_size(); - std::vector msecs(num_frames, 0); + std::vector msecs(num_frames, 0); if (timestamps) { CHECK_EQ(num_frames, timestamps->size()); msecs = *timestamps; @@ -905,7 +905,7 @@ void FlowPackager::FinalizeTrackingContainerProto( } void FlowPackager::InitializeMetaData(int num_frames, - const std::vector& msecs, + const std::vector& msecs, const std::vector& data_sizes, MetaData* meta_data) const { meta_data->set_num_frames(num_frames); diff --git a/mediapipe/util/tracking/image_util.cc b/mediapipe/util/tracking/image_util.cc index 90ac1ac7b..a44c00b01 100644 --- a/mediapipe/util/tracking/image_util.cc +++ b/mediapipe/util/tracking/image_util.cc @@ -32,8 +32,8 @@ float FrameDifferenceMedian(const cv::Mat& img_1, const cv::Mat& img_2) { color_diffs.reserve(img_1.cols * img_1.rows); const int channels = img_1.channels(); for (int j = 0; j < img_1.rows; ++j) { - const uint8* src_1 = img_1.ptr(j); - const uint8* src_2 = img_2.ptr(j); + const uint8_t* src_1 = img_1.ptr(j); + const uint8_t* src_2 = img_2.ptr(j); const int end_i = channels * img_1.cols; const float inverse = 1.0f / channels; for (int i = 0; i < end_i;) { diff --git a/mediapipe/util/tracking/motion_analysis.cc b/mediapipe/util/tracking/motion_analysis.cc index 5b6a970cf..67973cbcf 100644 --- a/mediapipe/util/tracking/motion_analysis.cc +++ b/mediapipe/util/tracking/motion_analysis.cc @@ -338,13 +338,13 @@ void MotionAnalysis::InitPolicyOptions() { } } -bool MotionAnalysis::AddFrame(const cv::Mat& frame, int64 timestamp_usec, +bool MotionAnalysis::AddFrame(const cv::Mat& frame, int64_t timestamp_usec, RegionFlowFeatureList* feature_list) { return AddFrameWithSeed(frame, timestamp_usec, Homography(), feature_list); } bool MotionAnalysis::AddFrameWithSeed(const cv::Mat& frame, - int64 timestamp_usec, + int64_t timestamp_usec, const Homography& initial_transform, RegionFlowFeatureList* feature_list) { return AddFrameGeneric(frame, timestamp_usec, initial_transform, @@ -355,7 +355,7 @@ bool MotionAnalysis::AddFrameWithSeed(const cv::Mat& frame, } bool MotionAnalysis::AddFrameGeneric( - const cv::Mat& frame, int64 timestamp_usec, + const cv::Mat& frame, int64_t timestamp_usec, const Homography& initial_transform, const Homography* rejection_transform, const RegionFlowFeatureList* external_features, std::function* modify_features, @@ -730,7 +730,7 @@ void MotionAnalysis::ComputeDenseForeground( foreground_mask->create(frame_height_, frame_width_, CV_8U); for (int i = 0; i < frame_height_; ++i) { const float* src_ptr = foreground_map.ptr(i); - uint8* dst_ptr = foreground_mask->ptr(i); + uint8_t* dst_ptr = foreground_mask->ptr(i); for (int j = 0; j < frame_width_; ++j) { // Result is in first channel (second is confidence). dst_ptr[j] = @@ -758,8 +758,8 @@ void MotionAnalysis::VisualizeDenseForeground(const cv::Mat& foreground_mask, // Burn-in alpha compositing. const float alpha = 1.3f; for (int i = 0; i < frame_height_; ++i) { - uint8* image_ptr = output->ptr(i); - const uint8* foreground_ptr = foreground_mask.ptr(i); + uint8_t* image_ptr = output->ptr(i); + const uint8_t* foreground_ptr = foreground_mask.ptr(i); for (int j = 0; j < frame_width_; ++j) { const float norm_foreground = foreground_ptr[j] * (1.0 / 255.0f); diff --git a/mediapipe/util/tracking/motion_estimation.cc b/mediapipe/util/tracking/motion_estimation.cc index e06acf1d1..b608b4705 100644 --- a/mediapipe/util/tracking/motion_estimation.cc +++ b/mediapipe/util/tracking/motion_estimation.cc @@ -3056,8 +3056,8 @@ bool MotionEstimation::GetTranslationIrlsInitialization( // Bool indicator which features agree with model in each round. // In case no RANSAC rounds are performed considered all features inliers. - std::vector best_features(num_features, 1); - std::vector curr_features(num_features); + std::vector best_features(num_features, 1); + std::vector curr_features(num_features); float best_sum = 0; unsigned int seed = 900913; // = Google in leet :) @@ -3095,7 +3095,7 @@ bool MotionEstimation::GetTranslationIrlsInitialization( for (int i = 0; i < num_features; ++i) { const Feature& feature = feature_list->feature(i); const Vector2_f diff = FeatureFlow(feature) - flow; - curr_features[i] = static_cast(diff.Norm2() < sq_cutoff); + curr_features[i] = static_cast(diff.Norm2() < sq_cutoff); if (curr_features[i]) { float score = feature.irls_weight(); if (inlier_mask) { @@ -3366,8 +3366,8 @@ bool MotionEstimation::GetSimilarityIrlsInitialization( // Bool indicator which features agree with model in each round. // In case no RANSAC rounds are performed considered all features inliers. - std::vector best_features(num_features, 1); - std::vector curr_features(num_features); + std::vector best_features(num_features, 1); + std::vector curr_features(num_features); float best_sum = 0; unsigned int seed = 900913; // = Google in leet :) @@ -3417,7 +3417,7 @@ bool MotionEstimation::GetSimilarityIrlsInitialization( const Vector2_f trans_location = LinearSimilarityAdapter::TransformPoint( similarity, FeatureLocation(feature)); const Vector2_f diff = FeatureMatchLocation(feature) - trans_location; - curr_features[i] = static_cast(diff.Norm2() < sq_cutoff); + curr_features[i] = static_cast(diff.Norm2() < sq_cutoff); if (curr_features[i]) { float score = feature.irls_weight(); if (inlier_mask) { diff --git a/mediapipe/util/tracking/region_flow_computation.cc b/mediapipe/util/tracking/region_flow_computation.cc index 708c868b5..b6704cc61 100644 --- a/mediapipe/util/tracking/region_flow_computation.cc +++ b/mediapipe/util/tracking/region_flow_computation.cc @@ -149,7 +149,7 @@ void GetPatchDescriptorAtPoint(const cv::Mat& rgb_frame, const Vector2_i& pt, // Compute channel sums and means. int sum[3] = {0, 0, 0}; for (int y = 0; y < diameter; ++y) { - const uint8* data = rgb_window.ptr(y); + const uint8_t* data = rgb_window.ptr(y); for (int x = 0; x < diameter; ++x, data += 3) { for (int c = 0; c < 3; ++c) { sum[c] += data[c]; @@ -175,7 +175,7 @@ void GetPatchDescriptorAtPoint(const cv::Mat& rgb_frame, const Vector2_i& pt, // using N = diameter * diameter and sum[c] = N * mean[c]. product[c][d] = -sum[c] * sum[d] * denom; for (int y = 0; y < diameter; ++y) { - const uint8* data = rgb_window.ptr(y); + const uint8_t* data = rgb_window.ptr(y); for (int x = 0; x < diameter; ++x, data += 3) { product[c][d] += static_cast(data[c]) * data[d]; } @@ -355,7 +355,7 @@ struct RegionFlowComputation::FrameTrackingData { int frame_num = 0; // Timestamp of the underlying frame. - int64 timestamp_usec = 0; + int64_t timestamp_usec = 0; // Difference of this FrameTrackingData's tiny_image w.r.t. previous one, // i.e. one frame earlier. @@ -407,7 +407,7 @@ struct RegionFlowComputation::FrameTrackingData { } } - void Reset(int frame_num_, int64 timestamp_) { + void Reset(int frame_num_, int64_t timestamp_) { frame_num = frame_num_; timestamp_usec = timestamp_; pyramid_levels = 0; @@ -834,19 +834,19 @@ RegionFlowComputation::RegionFlowComputation( RegionFlowComputation::~RegionFlowComputation() {} bool RegionFlowComputation::AddImage(const cv::Mat& source, - int64 timestamp_usec) { + int64_t timestamp_usec) { return AddImageAndTrack(source, cv::Mat(), timestamp_usec, Homography()); } bool RegionFlowComputation::AddImageWithSeed( - const cv::Mat& source, int64 timestamp_usec, + const cv::Mat& source, int64_t timestamp_usec, const Homography& initial_transform) { return AddImageAndTrack(source, cv::Mat(), timestamp_usec, initial_transform); } bool RegionFlowComputation::AddImageWithMask(const cv::Mat& source, const cv::Mat& source_mask, - int64 timestamp_usec) { + int64_t timestamp_usec) { return AddImageAndTrack(source, source_mask, timestamp_usec, Homography()); } @@ -1035,7 +1035,7 @@ bool RegionFlowComputation::InitFrame(const cv::Mat& source, } bool RegionFlowComputation::AddImageAndTrack( - const cv::Mat& source, const cv::Mat& source_mask, int64 timestamp_usec, + const cv::Mat& source, const cv::Mat& source_mask, int64_t timestamp_usec, const Homography& initial_transform) { VLOG(1) << "Processing frame " << frame_num_ << " at " << timestamp_usec; MEASURE_TIME << "AddImageAndTrack"; @@ -1622,12 +1622,12 @@ inline void SetMaskNeighborhood(int mask_x, int mask_y, cv::Mat* mask) { if (!add) { for (int i = mask_start_y; i <= mask_end_y; ++i) { - uint8* mask_ptr = mask->ptr(i) + mask_start_x; + uint8_t* mask_ptr = mask->ptr(i) + mask_start_x; memset(mask_ptr, K, mask_dx * sizeof(*mask_ptr)); } } else { for (int i = mask_start_y; i <= mask_end_y; ++i) { - uint8* mask_ptr = mask->ptr(i); + uint8_t* mask_ptr = mask->ptr(i); for (int j = mask_start_x; j <= mask_end_x; ++j) { mask_ptr[j] = (mask_ptr[j] & 0x7F) + K; // Limit to 128. } @@ -1764,7 +1764,7 @@ void RegionFlowComputation::AdaptiveGoodFeaturesToTrack( const int mask_y = corner_y * mask_scale; // Test if neighboring element is already set. - if (mask->at(mask_y, mask_x) >= 1) { + if (mask->at(mask_y, mask_x) >= 1) { continue; } @@ -1829,8 +1829,8 @@ void RegionFlowComputation::AdaptiveGoodFeaturesToTrack( } // Map corner pointer to x and y location. - const int offset = reinterpret_cast(corner_ptr) - - eig_image->ptr(0); + const int offset = reinterpret_cast(corner_ptr) - + eig_image->ptr(0); const int corner_y = offset / eig_image->step[0]; const int corner_x = @@ -1846,7 +1846,7 @@ void RegionFlowComputation::AdaptiveGoodFeaturesToTrack( const int mask_y = corner_y * mask_scale; // Test if neighboring element is already set. - if (mask->at(mask_y, mask_x) >= 1) { + if (mask->at(mask_y, mask_x) >= 1) { continue; } @@ -2208,7 +2208,7 @@ void RegionFlowComputation::RemoveFeaturesOutsideMask(FrameTrackingData* data) { for (int k = data->features.size() - 1; k >= 0; --k) { const int x = static_cast(data->features[k].x + 0.5); const int y = static_cast(data->features[k].y + 0.5); - if (data->mask.at(y, x) == 0) { + if (data->mask.at(y, x) == 0) { data->RemoveFeature(k); } } @@ -2290,7 +2290,7 @@ void RegionFlowComputation::ExtractFeatures( if (!data->mask.empty()) { cv::resize(data->mask, mask, mask.size(), 0, 0, cv::INTER_NEAREST); for (int y = 0; y < mask.rows; ++y) { - uint8* mask_ptr = mask.ptr(y); + uint8_t* mask_ptr = mask.ptr(y); for (int x = 0; x < mask.cols; ++x) { mask_ptr[x] = mask_ptr[x] == 0 ? 1 : 0; } @@ -2403,7 +2403,7 @@ void RegionFlowComputation::ExtractFeatures( // to "join", without having to explicitly represent this. // Value of 2 improves number of connected features. constexpr int kMaxFeaturesPerBin = 1; - if (mask.at(mask_y, mask_x) >= kMaxFeaturesPerBin) { + if (mask.at(mask_y, mask_x) >= kMaxFeaturesPerBin) { data->actively_discarded_tracked_ids.push_back(track_id); continue; } diff --git a/mediapipe/util/tracking/tone_estimation.cc b/mediapipe/util/tracking/tone_estimation.cc index 02da9cb82..587fe96f2 100644 --- a/mediapipe/util/tracking/tone_estimation.cc +++ b/mediapipe/util/tracking/tone_estimation.cc @@ -154,8 +154,8 @@ void ToneEstimation::IntensityPercentiles(const cv::Mat& frame, std::vector histogram(256, 0.0f); for (int i = 0; i < intensity.rows; ++i) { - const uint8* intensity_ptr = intensity.ptr(i); - const uint8* clip_ptr = clip_mask.ptr(i); + const uint8_t* intensity_ptr = intensity.ptr(i); + const uint8_t* clip_ptr = clip_mask.ptr(i); for (int j = 0; j < intensity.cols; ++j) { if (!clip_ptr[j]) { diff --git a/mediapipe/util/tracking/tone_models.cc b/mediapipe/util/tracking/tone_models.cc index ab98bfbaf..9410834bd 100644 --- a/mediapipe/util/tracking/tone_models.cc +++ b/mediapipe/util/tracking/tone_models.cc @@ -64,8 +64,8 @@ void ToneModelMethods::MapImage(const Model& model, const float inv_norm_scale = 1.0f / norm_scale; for (int i = 0; i < input.rows; ++i) { - const uint8* input_ptr = input.ptr(i); - uint8* output_ptr = output->ptr(i); + const uint8_t* input_ptr = input.ptr(i); + uint8_t* output_ptr = output->ptr(i); for (int j = 0; j < input.cols; ++j, input_ptr += 3, output_ptr += out_channels) { Vector3_f color_vec(input_ptr[0], input_ptr[1], input_ptr[2]); diff --git a/mediapipe/util/tracking/tracked_detection_manager.cc b/mediapipe/util/tracking/tracked_detection_manager.cc index a9e348ceb..7da207682 100644 --- a/mediapipe/util/tracking/tracked_detection_manager.cc +++ b/mediapipe/util/tracking/tracked_detection_manager.cc @@ -48,7 +48,7 @@ std::vector TrackedDetectionManager::AddDetection( std::unique_ptr detection) { std::vector ids_to_remove; - int64 latest_duplicate_timestamp = 0; + int64_t latest_duplicate_timestamp = 0; // TODO: All detections should be fastforwarded to the current // timestamp before adding the detection manager. E.g. only check they are the // same if the timestamp are the same. @@ -86,7 +86,7 @@ std::vector TrackedDetectionManager::AddDetection( } std::vector TrackedDetectionManager::UpdateDetectionLocation( - int id, const NormalizedRect& bounding_box, int64 timestamp) { + int id, const NormalizedRect& bounding_box, int64_t timestamp) { // TODO: Remove all boxes that are not updating. auto detection_ptr = detections_.find(id); if (detection_ptr == detections_.end()) { @@ -104,7 +104,7 @@ std::vector TrackedDetectionManager::UpdateDetectionLocation( } std::vector TrackedDetectionManager::RemoveObsoleteDetections( - int64 timestamp) { + int64_t timestamp) { std::vector ids_to_remove; for (auto& existing_detection : detections_) { if (existing_detection.second->last_updated_timestamp() < timestamp) { diff --git a/mediapipe/util/tracking/tracking.cc b/mediapipe/util/tracking/tracking.cc index 88ba39807..50aaa940c 100644 --- a/mediapipe/util/tracking/tracking.cc +++ b/mediapipe/util/tracking/tracking.cc @@ -1845,8 +1845,8 @@ void MotionBox::TranslationIrlsInitialization( // Bool indicator which features agree with model in each round. // In case no RANSAC rounds are performed considered all features inliers. - std::vector best_features(num_features, 1); - std::vector curr_features(num_features); + std::vector best_features(num_features, 1); + std::vector curr_features(num_features); float best_sum = 0; unsigned int seed = 900913; @@ -1867,7 +1867,7 @@ void MotionBox::TranslationIrlsInitialization( for (int i = 0; i < num_features; ++i) { const Vector2_f diff = vectors[i]->object - flow; const float error = ErrorDiff(diff, error_system); - curr_features[i] = static_cast(error < sq_cutoff); + curr_features[i] = static_cast(error < sq_cutoff); if (curr_features[i]) { curr_sum += (*weights)[i]; }