Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions ci/Dockerfile.micro
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,12 @@ RUN apt-get install -y zip xxd sudo
RUN apt install -y lsb-release wget software-properties-common gnupg
RUN wget https://apt.llvm.org/llvm.sh
RUN chmod +x llvm.sh
RUN ./llvm.sh 16
RUN ln -s /usr/bin/clang-16 /usr/bin/clang
RUN ln -s /usr/bin/clang++-16 /usr/bin/clang++
RUN ./llvm.sh 21
RUN ln -s /usr/bin/clang-21 /usr/bin/clang
RUN ln -s /usr/bin/clang++-21 /usr/bin/clang++

RUN apt-get install clang-format-16
RUN ln -s /usr/bin/clang-format-16 /usr/bin/clang-format
RUN apt-get install clang-format-21
RUN ln -s /usr/bin/clang-format-21 /usr/bin/clang-format

# Needed when using the Dockerfile locally.
RUN git config --global --add safe.directory /opt/tflm
Expand Down
34 changes: 20 additions & 14 deletions tensorflow/lite/experimental/microfrontend/lib/filterbank_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,9 @@ TF_LITE_MICRO_TEST(FilterbankTest_CheckChannelFrequencyStarts) {
kSampleRate, kSpectrumSize));

const int16_t expected[] = {0, 4, 8};
TF_LITE_MICRO_EXPECT_EQ(state.num_channels + 1,
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
TF_LITE_MICRO_EXPECT_EQ(
state.num_channels + 1,
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
int i;
for (i = 0; i <= state.num_channels; ++i) {
TF_LITE_MICRO_EXPECT_EQ(state.channel_frequency_starts[i], expected[i]);
Expand All @@ -93,8 +94,9 @@ TF_LITE_MICRO_TEST(FilterbankTest_CheckChannelWeightStarts) {
kSampleRate, kSpectrumSize));

const int16_t expected[] = {0, 8, 16};
TF_LITE_MICRO_EXPECT_EQ(state.num_channels + 1,
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
TF_LITE_MICRO_EXPECT_EQ(
state.num_channels + 1,
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
int i;
for (i = 0; i <= state.num_channels; ++i) {
TF_LITE_MICRO_EXPECT_EQ(state.channel_weight_starts[i], expected[i]);
Expand All @@ -110,8 +112,9 @@ TF_LITE_MICRO_TEST(FilterbankTest_CheckChannelWidths) {
kSampleRate, kSpectrumSize));

const int16_t expected[] = {8, 8, 8};
TF_LITE_MICRO_EXPECT_EQ(state.num_channels + 1,
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
TF_LITE_MICRO_EXPECT_EQ(
state.num_channels + 1,
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
int i;
for (i = 0; i <= state.num_channels; ++i) {
TF_LITE_MICRO_EXPECT_EQ(state.channel_widths[i], expected[i]);
Expand All @@ -129,9 +132,10 @@ TF_LITE_MICRO_TEST(FilterbankTest_CheckWeights) {
const int16_t expected[] = {0, 3277, 2217, 1200, 222, 0, 0, 0,
0, 3376, 2468, 1591, 744, 0, 0, 0,
0, 4020, 3226, 2456, 1708, 983, 277, 0};
TF_LITE_MICRO_EXPECT_EQ(state.channel_weight_starts[state.num_channels] +
state.channel_widths[state.num_channels],
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
TF_LITE_MICRO_EXPECT_EQ(
state.channel_weight_starts[state.num_channels] +
state.channel_widths[state.num_channels],
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
for (size_t i = 0; i < sizeof(expected) / sizeof(expected[0]); ++i) {
TF_LITE_MICRO_EXPECT_EQ(state.weights[i], expected[i]);
}
Expand All @@ -148,9 +152,10 @@ TF_LITE_MICRO_TEST(FilterbankTest_CheckUnweights) {
const int16_t expected[] = {0, 819, 1879, 2896, 3874, 0, 0, 0,
0, 720, 1628, 2505, 3352, 0, 0, 0,
0, 76, 870, 1640, 2388, 3113, 3819, 0};
TF_LITE_MICRO_EXPECT_EQ(state.channel_weight_starts[state.num_channels] +
state.channel_widths[state.num_channels],
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
TF_LITE_MICRO_EXPECT_EQ(
state.channel_weight_starts[state.num_channels] +
state.channel_widths[state.num_channels],
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
for (size_t i = 0; i < sizeof(expected) / sizeof(expected[0]); ++i) {
TF_LITE_MICRO_EXPECT_EQ(state.unweights[i], expected[i]);
}
Expand Down Expand Up @@ -204,8 +209,9 @@ TF_LITE_MICRO_TEST(FilterbankTest_CheckSqrt) {
uint32_t* scaled_filterbank = FilterbankSqrt(&state, kScaleShift);

const uint32_t expected[] = {247311, 508620};
TF_LITE_MICRO_EXPECT_EQ(state.num_channels,
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
TF_LITE_MICRO_EXPECT_EQ(
state.num_channels,
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
int i;
for (i = 0; i < state.num_channels; ++i) {
TF_LITE_MICRO_EXPECT_EQ(scaled_filterbank[i], expected[i]);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,9 @@ TF_LITE_MICRO_TEST(NoiseReductionTest_TestNoiseReductionEstimate) {
NoiseReductionApply(&state, signal);

const uint32_t expected[] = {6321887, 31248341};
TF_LITE_MICRO_EXPECT_EQ(state.num_channels,
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
TF_LITE_MICRO_EXPECT_EQ(
state.num_channels,
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
int i;
for (i = 0; i < state.num_channels; ++i) {
TF_LITE_MICRO_EXPECT_EQ(state.estimate[i], expected[i]);
Expand All @@ -68,8 +69,9 @@ TF_LITE_MICRO_TEST(NoiseReductionTest_TestNoiseReduction) {
NoiseReductionApply(&state, signal);

const uint32_t expected[] = {241137, 478104};
TF_LITE_MICRO_EXPECT_EQ(state.num_channels,
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
TF_LITE_MICRO_EXPECT_EQ(
state.num_channels,
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
int i;
for (i = 0; i < state.num_channels; ++i) {
TF_LITE_MICRO_EXPECT_EQ(signal[i], expected[i]);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,9 @@ TF_LITE_MICRO_TEST(PcanGainControlTest_TestPcanGainControl) {
PcanGainControlApply(&state, signal);

const uint32_t expected[] = {3578, 1533};
TF_LITE_MICRO_EXPECT_EQ(state.num_channels,
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
TF_LITE_MICRO_EXPECT_EQ(
state.num_channels,
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
int i;
for (i = 0; i < state.num_channels; ++i) {
TF_LITE_MICRO_EXPECT_EQ(signal[i], expected[i]);
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/lite/micro/cortex_m_corstone_300/system_setup.cc
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,8 @@ uint32_t GetCurrentTimeTicks() {

#ifdef ETHOS_U
#if defined(ETHOSU_FAST_MEMORY_SIZE) && ETHOSU_FAST_MEMORY_SIZE > 0
__attribute__((aligned(16), section(".bss.ethosu_scratch")))
uint8_t ethosu0_scratch[ETHOSU_FAST_MEMORY_SIZE];
__attribute__((aligned(16), section(".bss.ethosu_scratch"))) uint8_t
ethosu0_scratch[ETHOSU_FAST_MEMORY_SIZE];
#else
#define ethosu0_scratch 0
#define ETHOSU_FAST_MEMORY_SIZE 0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ typedef mli_status (*conv_func_ptr)(const mli_tensor* /*in*/,
mli_tensor* /*out*/);

#ifdef MLI_2_0
conv_func_ptr __attribute__((weak))
mli_krn_conv2d_hwcn(const mli_tensor* weights) {
conv_func_ptr
__attribute__((weak)) mli_krn_conv2d_hwcn(const mli_tensor* weights) {
int filter_w = weights->shape[KRNL_W_DIM_HWCN];
int filter_h = weights->shape[KRNL_H_DIM_HWCN];

Expand All @@ -41,8 +41,9 @@ mli_krn_conv2d_hwcn(const mli_tensor* weights) {
}
}
#else
conv_func_ptr __attribute__((weak))
mli_krn_conv2d_hwcn(const mli_tensor* weights, const mli_conv2d_cfg* cfg) {
conv_func_ptr
__attribute__((weak)) mli_krn_conv2d_hwcn(const mli_tensor* weights,
const mli_conv2d_cfg* cfg) {
return mli_krn_conv2d_nhwc_sa8_sa8_sa32;
}
#endif
Expand All @@ -55,8 +56,8 @@ typedef mli_status (*depthwise_func_ptr)(const mli_tensor* /*in*/,
mli_tensor* /*out*/);

#ifdef MLI_2_0
depthwise_func_ptr __attribute__((weak))
mli_krn_depthwise_conv2d(const mli_tensor* weights) {
depthwise_func_ptr
__attribute__((weak)) mli_krn_depthwise_conv2d(const mli_tensor* weights) {
int filter_w = weights->shape[KRNL_DW_W_DIM_HW1N];
int filter_h = weights->shape[KRNL_DW_H_DIM_HW1N];

Expand All @@ -69,15 +70,16 @@ mli_krn_depthwise_conv2d(const mli_tensor* weights) {
}
}
#else
depthwise_func_ptr __attribute__((weak))
mli_krn_depthwise_conv2d(const mli_tensor* weights, const mli_conv2d_cfg* cfg) {
depthwise_func_ptr
__attribute__((weak)) mli_krn_depthwise_conv2d(const mli_tensor* weights,
const mli_conv2d_cfg* cfg) {
return mli_krn_depthwise_conv2d_hwcn_sa8_sa8_sa32;
}
#endif

#ifdef MLI_2_0
depthwise_func_ptr __attribute__((weak))
mli_krn_group_conv2d(const mli_tensor* weights) {
depthwise_func_ptr
__attribute__((weak)) mli_krn_group_conv2d(const mli_tensor* weights) {
int filter_w = weights->shape[KRNL_DW_W_DIM_HW1N];
int filter_h = weights->shape[KRNL_DW_H_DIM_HW1N];

Expand All @@ -97,8 +99,8 @@ typedef mli_status (*pooling_func_ptr)(const mli_tensor* /*in*/,
mli_tensor* /*out*/);

#ifdef MLI_2_0
pooling_func_ptr __attribute__((weak))
mli_krn_avepool(const mli_pool_cfg* cfg) {
pooling_func_ptr
__attribute__((weak)) mli_krn_avepool(const mli_pool_cfg* cfg) {
int filter_w = cfg->kernel_width;
int filter_h = cfg->kernel_height;

Expand All @@ -111,15 +113,15 @@ mli_krn_avepool(const mli_pool_cfg* cfg) {
}
}
#else
pooling_func_ptr __attribute__((weak))
mli_krn_avepool(const mli_pool_cfg* cfg) {
pooling_func_ptr
__attribute__((weak)) mli_krn_avepool(const mli_pool_cfg* cfg) {
return mli_krn_avepool_hwc_sa8;
}
#endif

#ifdef MLI_2_0
pooling_func_ptr __attribute__((weak))
mli_krn_maxpool(const mli_pool_cfg* cfg) {
pooling_func_ptr
__attribute__((weak)) mli_krn_maxpool(const mli_pool_cfg* cfg) {
int filter_w = cfg->kernel_width;
int filter_h = cfg->kernel_height;

Expand All @@ -132,8 +134,8 @@ mli_krn_maxpool(const mli_pool_cfg* cfg) {
}
}
#else
pooling_func_ptr __attribute__((weak))
mli_krn_maxpool(const mli_pool_cfg* cfg) {
pooling_func_ptr
__attribute__((weak)) mli_krn_maxpool(const mli_pool_cfg* cfg) {
return mli_krn_maxpool_hwc_sa8;
}
#endif
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/micro/kernels/arc_mli/mli_interface.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class MliTensorInterface {
public:
// Make sure that lifetime of MliTensorInterface instance isn't bigger than
// related mli_tensor.
MliTensorInterface(mli_tensor* tensor) : tensor_(tensor){};
MliTensorInterface(mli_tensor* tensor) : tensor_(tensor) {};
MliTensorInterface() = default;
~MliTensorInterface() = default;

Expand Down
4 changes: 2 additions & 2 deletions tensorflow/lite/micro/kernels/circular_buffer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,8 @@ TfLiteStatus CircularBufferEval(TfLiteContext* context, TfLiteNode* node) {
EvalInt8(tflite::micro::GetTensorData<int8_t>(input), num_slots, depth,
tflite::micro::GetTensorData<int8_t>(output));
} else {
MicroPrintf("Type %s (%d) not supported.",
TfLiteTypeGetName(input->type), input->type);
MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type),
input->type);
return kTfLiteError;
}

Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/micro/kernels/reduce_common.cc
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ enum MinMaxEvalType { kEvalMin, kEvalMax };
template <typename T>
struct MinMaxReducerCompare {
MinMaxReducerCompare() = delete;
MinMaxReducerCompare(MinMaxEvalType evalType) : type_(evalType){};
MinMaxReducerCompare(MinMaxEvalType evalType) : type_(evalType) {};
Copy link
Member

@ddavis-2015 ddavis-2015 Sep 20, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe the Google style rules say that a space is not allowed between the parenthesis and the brace, and my IDE with it's auto formatting agrees. But the Pigweed style checker says that this is wrong and spacing must be there. I am much interested in your final resolution of this. This difference has only appeared in the last year or so.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm sticking with the current format because it follows the rule "There should be a space between the close parenthesis and the open curly brace." (link

Given that the Pigweed style is implemented by clang-format (following Google conventions), the practical decision is to treat the tool's output as the final authority, overriding minor written style guide disagreements.


constexpr T initialValue() const {
return (type_ == kEvalMin) ? std::numeric_limits<T>::max()
Expand Down
12 changes: 8 additions & 4 deletions tensorflow/lite/micro/kernels/resize_bilinear_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -168,8 +168,10 @@ TF_LITE_MICRO_TEST(VerticalResizeInt8) {
TF_LITE_MICRO_TEST(TwoDimensionalResize) {
int input_dims[] = {4, 1, 2, 2, 1};
const float input_data[] = {
3, 6, //
9, 12, //
3,
6, //
9,
12, //
};
const int32_t expected_size_data[] = {3, 3};
const float expected_output_data[] = {
Expand All @@ -194,8 +196,10 @@ TF_LITE_MICRO_TEST(TwoDimensionalResize) {
TF_LITE_MICRO_TEST(TwoDimensionalResizeInt8) {
int input_dims[] = {4, 1, 2, 2, 1};
const int8_t input_data[] = {
3, 6, //
9, 12, //
3,
6, //
9,
12, //
};
const int32_t expected_size_data[] = {3, 3};
const int8_t expected_output_data[] = {
Expand Down
18 changes: 12 additions & 6 deletions tensorflow/lite/micro/kernels/resize_nearest_neighbor_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -156,8 +156,10 @@ TF_LITE_MICRO_TEST(VerticalResizeInt16) {
TF_LITE_MICRO_TEST(TwoDimensionalResize) {
int input_dims[] = {4, 1, 2, 2, 1};
const float input_data[] = {
3, 6, //
9, 12, //
3,
6, //
9,
12, //
};
const int32_t expected_size_data[] = {3, 3};
const float expected_output_data[] = {
Expand All @@ -177,8 +179,10 @@ TF_LITE_MICRO_TEST(TwoDimensionalResize) {
TF_LITE_MICRO_TEST(TwoDimensionalResizeInt8) {
int input_dims[] = {4, 1, 2, 2, 1};
const int8_t input_data[] = {
3, -6, //
9, 12, //
3,
-6, //
9,
12, //
};
const int32_t expected_size_data[] = {3, 3};
const int8_t expected_output_data[] = {
Expand All @@ -197,8 +201,10 @@ TF_LITE_MICRO_TEST(TwoDimensionalResizeInt8) {
TF_LITE_MICRO_TEST(TwoDimensionalResizeInt16) {
int input_dims[] = {4, 1, 2, 2, 1};
const int16_t input_data[] = {
3, -6, //
9, 12, //
3,
-6, //
9,
12, //
};
const int32_t expected_size_data[] = {3, 3};
const int16_t expected_output_data[] = {
Expand Down
Loading
Loading