Skip to content

Commit

Permalink
[CPU] Fix readability-else-after-return clang-tidy remarks (#28969)
Browse files Browse the repository at this point in the history
### Details:
 - Fix "readability-else-after-return" remarks reported by clang-tidy
- Enable "readability-else-after-return" clang-tidy checks on CI by
default

### Tickets:
 - N/A
  • Loading branch information
aobolensk authored Feb 27, 2025
1 parent 8db08c1 commit e570253
Show file tree
Hide file tree
Showing 73 changed files with 593 additions and 634 deletions.
1 change: 1 addition & 0 deletions src/plugins/intel_cpu/src/.clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ Checks: >
modernize-loop-convert,
modernize-pass-by-value,
cppcoreguidelines-prefer-member-initializer,
readability-else-after-return,
-bugprone-easily-swappable-parameters,
-bugprone-implicit-widening-of-multiplication-result,
-bugprone-narrowing-conversions,
Expand Down
69 changes: 46 additions & 23 deletions src/plugins/intel_cpu/src/compiled_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -280,65 +280,88 @@ ov::Any CompiledModel::get_property(const std::string& name) const {
// @todo Does not seem ok to 'dump()' the whole graph everytime in order to get a name
const std::string modelName = graph.dump()->get_friendly_name();
return decltype(ov::model_name)::value_type(modelName);
} else if (name == ov::optimal_number_of_infer_requests) {
}
if (name == ov::optimal_number_of_infer_requests) {
const auto streams = config.streamExecutorConfig.get_streams();
return static_cast<decltype(ov::optimal_number_of_infer_requests)::value_type>(
streams > 0 ? streams : 1); // ov::optimal_number_of_infer_requests has no negative values
} else if (name == ov::num_streams) {
}
if (name == ov::num_streams) {
const auto streams = config.streamExecutorConfig.get_streams();
return decltype(ov::num_streams)::value_type(
streams); // ov::num_streams has special negative values (AUTO = -1, NUMA = -2)
} else if (name == ov::inference_num_threads) {
}
if (name == ov::inference_num_threads) {
const auto num_threads = config.streamExecutorConfig.get_threads();
return static_cast<decltype(ov::inference_num_threads)::value_type>(num_threads);
} else if (name == ov::enable_profiling.name()) {
}
if (name == ov::enable_profiling.name()) {
const bool perfCount = config.collectPerfCounters;
return static_cast<decltype(ov::enable_profiling)::value_type>(perfCount);
} else if (name == ov::hint::inference_precision) {
}
if (name == ov::hint::inference_precision) {
return decltype(ov::hint::inference_precision)::value_type(config.inferencePrecision);
} else if (name == ov::hint::performance_mode) {
}
if (name == ov::hint::performance_mode) {
return static_cast<decltype(ov::hint::performance_mode)::value_type>(config.hintPerfMode);
} else if (name == ov::log::level) {
}
if (name == ov::log::level) {
return static_cast<decltype(ov::log::level)::value_type>(config.logLevel);
} else if (name == ov::hint::enable_cpu_pinning.name()) {
}
if (name == ov::hint::enable_cpu_pinning.name()) {
const bool use_pin = config.enableCpuPinning;
return static_cast<decltype(ov::hint::enable_cpu_pinning)::value_type>(use_pin);
} else if (name == ov::hint::enable_cpu_reservation.name()) {
}
if (name == ov::hint::enable_cpu_reservation.name()) {
const bool use_reserve = config.enableCpuReservation;
return static_cast<decltype(ov::hint::enable_cpu_reservation)::value_type>(use_reserve);
} else if (name == ov::hint::scheduling_core_type) {
}
if (name == ov::hint::scheduling_core_type) {
const auto stream_mode = config.schedulingCoreType;
return stream_mode;
} else if (name == ov::hint::model_distribution_policy) {
}
if (name == ov::hint::model_distribution_policy) {
const auto& distribution_policy = config.modelDistributionPolicy;
return distribution_policy;
} else if (name == ov::hint::enable_hyper_threading.name()) {
}
if (name == ov::hint::enable_hyper_threading.name()) {
const bool use_ht = config.enableHyperThreading;
return static_cast<decltype(ov::hint::enable_hyper_threading)::value_type>(use_ht);
} else if (name == ov::hint::execution_mode) {
}
if (name == ov::hint::execution_mode) {
return config.executionMode;
} else if (name == ov::hint::num_requests) {
}
if (name == ov::hint::num_requests) {
return static_cast<decltype(ov::hint::num_requests)::value_type>(config.hintNumRequests);
} else if (name == ov::execution_devices) {
}
if (name == ov::execution_devices) {
return decltype(ov::execution_devices)::value_type{m_plugin->get_device_name()};
} else if (name == ov::intel_cpu::denormals_optimization) {
}
if (name == ov::intel_cpu::denormals_optimization) {
return static_cast<decltype(ov::intel_cpu::denormals_optimization)::value_type>(
config.denormalsOptMode == Config::DenormalsOptMode::DO_On);
} else if (name == ov::intel_cpu::sparse_weights_decompression_rate) {
}
if (name == ov::intel_cpu::sparse_weights_decompression_rate) {
return static_cast<decltype(ov::intel_cpu::sparse_weights_decompression_rate)::value_type>(
config.fcSparseWeiDecompressionRate);
} else if (name == ov::hint::dynamic_quantization_group_size) {
}
if (name == ov::hint::dynamic_quantization_group_size) {
return static_cast<decltype(ov::hint::dynamic_quantization_group_size)::value_type>(
config.fcDynamicQuantizationGroupSize);
} else if (name == ov::hint::kv_cache_precision) {
}
if (name == ov::hint::kv_cache_precision) {
return decltype(ov::hint::kv_cache_precision)::value_type(config.kvCachePrecision);
} else if (name == ov::key_cache_precision) {
}
if (name == ov::key_cache_precision) {
return decltype(ov::key_cache_precision)::value_type(config.keyCachePrecision);
} else if (name == ov::value_cache_precision) {
}
if (name == ov::value_cache_precision) {
return decltype(ov::value_cache_precision)::value_type(config.valueCachePrecision);
} else if (name == ov::key_cache_group_size) {
}
if (name == ov::key_cache_group_size) {
return static_cast<decltype(ov::key_cache_group_size)::value_type>(config.keyCacheGroupSize);
} else if (name == ov::value_cache_group_size) {
}
if (name == ov::value_cache_group_size) {
return static_cast<decltype(ov::value_cache_group_size)::value_type>(config.valueCacheGroupSize);
}
OPENVINO_THROW("Unsupported property: ", name);
Expand Down
31 changes: 14 additions & 17 deletions src/plugins/intel_cpu/src/cpu_streams_calculation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,11 +101,10 @@ std::vector<std::vector<int>> get_streams_info_table(
streams_info_table.push_back(stream_info);
total_threads -= stream_info[THREADS_PER_STREAM];
return;
} else {
stream_info[THREADS_PER_STREAM] = one_proc_table[index][n];
streams_info_table.push_back(stream_info);
total_threads -= stream_info[THREADS_PER_STREAM];
}
stream_info[THREADS_PER_STREAM] = one_proc_table[index][n];
streams_info_table.push_back(stream_info);
total_threads -= stream_info[THREADS_PER_STREAM];
}
}
}
Expand Down Expand Up @@ -172,14 +171,13 @@ std::vector<std::vector<int>> get_streams_info_table(
}
if (count >= n_streams) {
return;
}
count = 0;
if (n_threads_per_stream > 1) {
n_threads_per_stream--;
} else {
count = 0;
if (n_threads_per_stream > 1) {
n_threads_per_stream--;
} else {
n_streams = n_threads;
return;
}
n_streams = n_threads;
return;
}
}
};
Expand Down Expand Up @@ -381,13 +379,12 @@ std::vector<std::vector<int>> get_streams_info_table(
}
if (stream_info[STREAM_SOCKET_ID] == row[PROC_SOCKET_ID]) {
continue;
} else {
stream_info[THREADS_PER_STREAM] = std::min(stream_info[THREADS_PER_STREAM], row[ALL_PROC]);
create_one_stream(row,
proc_type_table,
stream_info[THREADS_PER_STREAM],
IStreamsExecutor::Config::StreamsMode::SUB_STREAMS_FOR_SOCKET);
}
stream_info[THREADS_PER_STREAM] = std::min(stream_info[THREADS_PER_STREAM], row[ALL_PROC]);
create_one_stream(row,
proc_type_table,
stream_info[THREADS_PER_STREAM],
IStreamsExecutor::Config::StreamsMode::SUB_STREAMS_FOR_SOCKET);
}
stream_info = streams_info_table[0];
stream_info[NUMBER_OF_STREAMS] = 1;
Expand Down
3 changes: 1 addition & 2 deletions src/plugins/intel_cpu/src/cpu_types.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -270,9 +270,8 @@ Type TypeFromName(const std::string& type) {
auto itType = type_to_name_tbl.find(type);
if (type_to_name_tbl.end() != itType) {
return itType->second;
} else {
return Type::Unknown;
}
return Type::Unknown;
}

std::string NameFromType(const Type type) {
Expand Down
6 changes: 2 additions & 4 deletions src/plugins/intel_cpu/src/dnnl_extension_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -184,9 +184,8 @@ DnnlMemoryDescPtr DnnlExtensionUtils::makeDescriptor(const dnnl::memory::desc& d
DnnlMemoryDescPtr DnnlExtensionUtils::makeDescriptor(const_dnnl_memory_desc_t desc) {
if (desc->format_kind == dnnl::impl::format_kind_t::dnnl_blocked) {
return std::shared_ptr<DnnlBlockedMemoryDesc>(new DnnlBlockedMemoryDesc(desc));
} else {
return std::shared_ptr<DnnlMemoryDesc>(new DnnlMemoryDesc(desc));
}
return std::shared_ptr<DnnlMemoryDesc>(new DnnlMemoryDesc(desc));
}

size_t DnnlExtensionUtils::getMemSizeForDnnlDesc(const dnnl::memory::desc& desc) {
Expand All @@ -205,9 +204,8 @@ std::shared_ptr<DnnlBlockedMemoryDesc> DnnlExtensionUtils::makeUndefinedDesc(con
const Shape& shape) {
if (desc.get_format_kind() == memory::format_kind::blocked) {
return std::shared_ptr<DnnlBlockedMemoryDesc>(new DnnlBlockedMemoryDesc(desc, shape));
} else {
OPENVINO_THROW("Unexpected: Cannot make undefined descriptor. Only dnnl_blocked type is allowed.");
}
OPENVINO_THROW("Unexpected: Cannot make undefined descriptor. Only dnnl_blocked type is allowed.");
}

DnnlMemoryDescPtr DnnlExtensionUtils::query_md(const const_dnnl_primitive_desc_t& pd,
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_cpu/src/dnnl_postops_composer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -577,9 +577,9 @@ bool DnnlPostOpsComposer::appendLinear(const std::vector<float>& scale,
if (scale.size() == 1 && shift.size() == 1) {
if (shift[0] == 0.0f) {
return appendScale(scale, isLastPostOp, allowBinary);
} else {
appendEltwise(dnnl::algorithm::eltwise_linear, scale[0], shift[0]);
}
appendEltwise(dnnl::algorithm::eltwise_linear, scale[0], shift[0]);

} else {
// return before committing any changes
if (!allowBinary && shift.size() > 1) {
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_cpu/src/dnnl_postops_composer_legacy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -217,9 +217,9 @@ bool DnnlPostOpsComposerLegacy::appendLinear(const std::vector<float>& scale,
if (scale.size() == 1 && shift.size() == 1) {
if (shift[0] == 0.0f) {
return appendScale(scale, isLastPostOp, allowBinary);
} else {
appendEltwise(dnnl::algorithm::eltwise_linear, scale[0], shift[0]);
}
appendEltwise(dnnl::algorithm::eltwise_linear, scale[0], shift[0]);

} else {
// return before committing any changes
if (!allowBinary && shift.size() > 1) {
Expand Down
3 changes: 2 additions & 1 deletion src/plugins/intel_cpu/src/edge.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -570,7 +570,8 @@ EdgePtr Edge::getBaseEdge(int look) {
}
}
return next_ch_edge;
} else if (parentInPlacePort >= 0 && (look & LOOK_UP)) {
}
if (parentInPlacePort >= 0 && (look & LOOK_UP)) {
return getParent()->getParentEdgeAt(parentInPlacePort);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2557,11 +2557,12 @@ std::set<std::vector<element::Type>> jit_select_emitter::get_supported_precision
size_t jit_select_emitter::aux_vecs_count() const {
if (host_isa_ == x64::avx512_core) {
return 0;
} else if (host_isa_ == x64::avx2) { // tmp vec for mask
}
if (host_isa_ == x64::avx2) { // tmp vec for mask
return 1;
} else { // mask should be xmm0 on sse41 + tmp vec for mask
return 2;
}
// mask should be xmm0 on sse41 + tmp vec for mask
return 2;
}

void jit_select_emitter::emit_impl(const std::vector<size_t>& in_vec_idxs,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -312,9 +312,8 @@ ov::snippets::RegType CPUGenerator::get_specific_op_out_reg_type(const ov::Outpu
const auto op = out.get_node_shared_ptr();
if (ov::as_type_ptr<intel_cpu::FusedMulAdd>(op) || ov::as_type_ptr<intel_cpu::SwishNode>(op)) {
return ov::snippets::RegType::vec;
} else {
return ov::snippets::RegType::undefined;
}
return ov::snippets::RegType::undefined;
}

bool CPUGenerator::uses_precompiled_kernel(const std::shared_ptr<snippets::Emitter>& e) const {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,8 @@ static bool is_segfault_detector_emitter(const intel_cpu::jit_emitter* emitter)
return std::make_shared<jit_debug_emitter>(emitter, \
segfault_emitter, \
jit_debug_emitter::EmissionLocation::preamble); \
} else { \
return emitter; \
} \
return emitter; \
}, \
[](const std::shared_ptr<ov::Node>& n) -> std::set<std::vector<element::Type>> { \
return e_type::get_supported_precisions(n); \
Expand Down Expand Up @@ -439,11 +438,11 @@ ov::snippets::RegType intel_cpu::CPUGenerator::get_specific_op_out_reg_type(cons
#endif
is_type<intel_cpu::BrgemmCopyB>(op)) {
return ov::snippets::RegType::gpr;
} else if (is_type<intel_cpu::FusedMulAdd>(op) || is_type<intel_cpu::SwishNode>(op)) {
}
if (is_type<intel_cpu::FusedMulAdd>(op) || is_type<intel_cpu::SwishNode>(op)) {
return ov::snippets::RegType::vec;
} else {
return ov::snippets::RegType::undefined;
}
return ov::snippets::RegType::undefined;
}

bool intel_cpu::CPUGenerator::uses_precompiled_kernel(const std::shared_ptr<snippets::Emitter>& e) const {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,25 +65,28 @@ std::set<std::vector<element::Type>> jit_brgemm_emitter::get_supported_precision
const auto brgemm = as_type_ptr<ov::intel_cpu::BrgemmCPU>(node);
OV_CPU_JIT_EMITTER_ASSERT(brgemm, "get_supported_precisions() expects BrgemmCPU node");
using brgemm_utils::BRGEMM_TYPE;
if (brgemm->get_type() == BRGEMM_TYPE::STAND_ALONE) {
switch (brgemm->get_type()) {
case BRGEMM_TYPE::STAND_ALONE:
return {{element::f32, element::f32}};
} else if (brgemm->get_type() == BRGEMM_TYPE::REPACKING_ONLY) {
case BRGEMM_TYPE::REPACKING_ONLY: {
std::set<std::vector<element::Type>> supported_types = {{element::u8, element::i8},
{element::bf16, element::bf16},
{element::f32, element::f32}};
if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx2_vnni_2)) {
supported_types.insert({element::i8, element::i8});
}
return supported_types;
} else if (brgemm->get_type() == BRGEMM_TYPE::WITH_COMPENSATIONS) {
}
case BRGEMM_TYPE::WITH_COMPENSATIONS:
return {{element::i8, element::i8, element::f32}};
} else if (brgemm->get_type() == BRGEMM_TYPE::WITH_AMX) {
case BRGEMM_TYPE::WITH_AMX:
return {{element::i8, element::i8, element::u8},
{element::u8, element::i8, element::u8},
{element::bf16, element::bf16, element::u8},
{element::f16, element::f16, element::u8}};
default:
OV_CPU_JIT_EMITTER_THROW("got BrgemmCPU node with unsupported type");
}
OV_CPU_JIT_EMITTER_THROW("got BrgemmCPU node with unsupported type");
}

void jit_brgemm_emitter::validate_arguments(const std::vector<size_t>& in, const std::vector<size_t>& out) const {
Expand Down
9 changes: 3 additions & 6 deletions src/plugins/intel_cpu/src/graph_optimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -325,9 +325,8 @@ void GraphOptimizer::FuseConvolutionMatMulDeconvAndBias(Graph& graph) {

if (!deconv) {
return (one_of(node->getType(), Type::Convolution, Type::MatMul) && node->getParentEdges().size() == 2);
} else {
return deconv->canFuseBias();
}
return deconv->canFuseBias();
};

auto isSuitableChildNode = [&](const NodePtr& parentNode, const NodePtr& childNode) {
Expand Down Expand Up @@ -564,9 +563,8 @@ void GraphOptimizer::FuseMultiplyAndAdd(Graph& graph) {
if (dims[i] != 1) {
if (channelAxis != -1) { // more than one axis is != 1
return -1;
} else {
channelAxis = i;
}
channelAxis = i;
}
}
return channelAxis;
Expand Down Expand Up @@ -3154,9 +3152,8 @@ void GraphOptimizer::MatchSdpaKvCache(Graph& graph) {
sdpa = std::dynamic_pointer_cast<ScaledDotProductAttention>(child);
if (sdpa) {
break;
} else {
OPENVINO_THROW("Couldn't cast node", child->getName(), " to ScaledDotProductAttention type");
}
OPENVINO_THROW("Couldn't cast node", child->getName(), " to ScaledDotProductAttention type");
}
}

Expand Down
3 changes: 1 addition & 2 deletions src/plugins/intel_cpu/src/infer_request.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -334,9 +334,8 @@ const ov::Output<const ov::Node>& SyncInferRequest::get_internal_port(const ov::
OPENVINO_ASSERT(port_find.found(), "Can not find port: ", port.get_any_name());
if (port_find.is_input()) {
return m_input_ports_map.at(port_find.idx);
} else {
return m_output_ports_map.at(port_find.idx);
}
return m_output_ports_map.at(port_find.idx);
}

void SyncInferRequest::set_tensor(const ov::Output<const ov::Node>& in_port, const ov::SoPtr<ov::ITensor>& in_tensor) {
Expand Down
Loading

0 comments on commit e570253

Please sign in to comment.