diff --git a/orchagent/orch.cpp b/orchagent/orch.cpp index 8a4049583a..3c34e28f0a 100644 --- a/orchagent/orch.cpp +++ b/orchagent/orch.cpp @@ -959,6 +959,18 @@ task_process_status Orch::handleSaiSetStatus(sai_api_t api, sai_status_t status, sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); abort(); } + case SAI_API_TUNNEL: + switch (status) + { + case SAI_STATUS_ATTR_NOT_SUPPORTED_0: + SWSS_LOG_ERROR("Encountered SAI_STATUS_ATTR_NOT_SUPPORTED_0 in set operation, task failed, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + return task_failed; + default: + SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + abort(); + } default: SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); diff --git a/orchagent/tunneldecaporch.cpp b/orchagent/tunneldecaporch.cpp index e84ba315c4..065e78a0c0 100644 --- a/orchagent/tunneldecaporch.cpp +++ b/orchagent/tunneldecaporch.cpp @@ -144,7 +144,9 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setTunnelAttribute(fvField(i), ecn_mode, tunnel_id); + SWSS_LOG_NOTICE("Skip setting ecn_mode since the SAI attribute SAI_TUNNEL_ATTR_DECAP_ECN_MODE is create only"); + valid = false; + break; } } else if (fvField(i) == "encap_ecn_mode") @@ -158,7 +160,9 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setTunnelAttribute(fvField(i), encap_ecn_mode, tunnel_id); + SWSS_LOG_NOTICE("Skip setting encap_ecn_mode since the SAI attribute SAI_TUNNEL_ATTR_ENCAP_ECN_MODE is create only"); + valid = false; + break; } } else if (fvField(i) == "ttl_mode") @@ -582,30 +586,6 @@ bool TunnelDecapOrch::setTunnelAttribute(string field, string value, sai_object_ sai_attribute_t attr; - if (field == "ecn_mode") - { - // decap ecn mode (copy from outer/standard) - attr.id = SAI_TUNNEL_ATTR_DECAP_ECN_MODE; - if (value == "copy_from_outer") - { - attr.value.s32 = SAI_TUNNEL_DECAP_ECN_MODE_COPY_FROM_OUTER; - } - else if (value == "standard") - { - attr.value.s32 = SAI_TUNNEL_DECAP_ECN_MODE_STANDARD; - } - } - - if (field == "encap_ecn_mode") - { - // encap ecn mode (only standard is supported) - attr.id = SAI_TUNNEL_ATTR_ENCAP_ECN_MODE; - if (value == "standard") - { - attr.value.s32 = SAI_TUNNEL_ENCAP_ECN_MODE_STANDARD; - } - } - if (field == "ttl_mode") { // ttl mode (uniform/pipe) diff --git a/tests/mock_tests/qosorch_ut.cpp b/tests/mock_tests/qosorch_ut.cpp index c727aad70d..ab4aa9e972 100644 --- a/tests/mock_tests/qosorch_ut.cpp +++ b/tests/mock_tests/qosorch_ut.cpp @@ -25,6 +25,7 @@ namespace qosorch_test int sai_remove_scheduler_count; int sai_set_wred_attribute_count; sai_object_id_t switch_dscp_to_tc_map_id; + TunnelDecapOrch *tunnel_decap_orch; sai_remove_scheduler_fn old_remove_scheduler; sai_scheduler_api_t ut_sai_scheduler_api, *pold_sai_scheduler_api; @@ -36,6 +37,7 @@ namespace qosorch_test sai_qos_map_api_t ut_sai_qos_map_api, *pold_sai_qos_map_api; sai_set_switch_attribute_fn old_set_switch_attribute_fn; sai_switch_api_t ut_sai_switch_api, *pold_sai_switch_api; + sai_tunnel_api_t ut_sai_tunnel_api, *pold_sai_tunnel_api; typedef struct { @@ -212,6 +214,40 @@ namespace qosorch_test return rc; } + sai_status_t _ut_stub_sai_create_tunnel( + _Out_ sai_object_id_t *tunnel_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + *tunnel_id = (sai_object_id_t)(0x1); + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_create_tunnel_term_table_entry( + _Out_ sai_object_id_t *tunnel_term_table_entry_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + *tunnel_term_table_entry_id = (sai_object_id_t)(0x1); + return SAI_STATUS_SUCCESS; + } + + void checkTunnelAttribute(sai_attr_id_t attr) + { + ASSERT_TRUE(attr != SAI_TUNNEL_ATTR_ENCAP_ECN_MODE); + ASSERT_TRUE(attr != SAI_TUNNEL_ATTR_DECAP_ECN_MODE); + } + + sai_status_t _ut_stub_sai_set_tunnel_attribute( + _In_ sai_object_id_t tunnel_id, + _In_ const sai_attribute_t *attr) + { + checkTunnelAttribute(attr->id); + return SAI_STATUS_ATTR_NOT_SUPPORTED_0; + } + struct QosOrchTest : public ::testing::Test { QosOrchTest() @@ -292,6 +328,14 @@ namespace qosorch_test sai_switch_api = &ut_sai_switch_api; ut_sai_switch_api.set_switch_attribute = _ut_stub_sai_set_switch_attribute; + // Mock tunnel API + pold_sai_tunnel_api = sai_tunnel_api; + ut_sai_tunnel_api = *pold_sai_tunnel_api; + sai_tunnel_api = &ut_sai_tunnel_api; + ut_sai_tunnel_api.set_tunnel_attribute = _ut_stub_sai_set_tunnel_attribute; + ut_sai_tunnel_api.create_tunnel = _ut_stub_sai_create_tunnel; + ut_sai_tunnel_api.create_tunnel_term_table_entry = _ut_stub_sai_create_tunnel_term_table_entry; + // Init switch and create dependencies m_app_db = make_shared("APPL_DB", 0); m_config_db = make_shared("CONFIG_DB", 0); @@ -381,6 +425,9 @@ namespace qosorch_test ASSERT_EQ(gNeighOrch, nullptr); gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); + ASSERT_EQ(tunnel_decap_orch, nullptr); + tunnel_decap_orch = new TunnelDecapOrch(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + vector qos_tables = { CFG_TC_TO_QUEUE_MAP_TABLE_NAME, CFG_SCHEDULER_TABLE_NAME, @@ -394,7 +441,8 @@ namespace qosorch_test CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, CFG_DSCP_TO_FC_MAP_TABLE_NAME, - CFG_EXP_TO_FC_MAP_TABLE_NAME + CFG_EXP_TO_FC_MAP_TABLE_NAME, + CFG_TC_TO_DSCP_MAP_TABLE_NAME }; gQosOrch = new QosOrch(m_config_db.get(), qos_tables); @@ -557,10 +605,14 @@ namespace qosorch_test delete gQosOrch; gQosOrch = nullptr; + delete tunnel_decap_orch; + tunnel_decap_orch = nullptr; + sai_qos_map_api = pold_sai_qos_map_api; sai_scheduler_api = pold_sai_scheduler_api; sai_wred_api = pold_sai_wred_api; sai_switch_api = pold_sai_switch_api; + sai_tunnel_api = pold_sai_tunnel_api; ut_helper::uninitSaiApi(); } }; @@ -1458,4 +1510,80 @@ namespace qosorch_test // Drain DSCP_TO_TC_MAP and PORT_QOS_MAP table static_cast(gQosOrch)->doTask(); } + + /* + * Set tunnel QoS attribute test - OA should skip settings + */ + TEST_F(QosOrchTest, QosOrchTestSetTunnelQoSAttribute) + { + // Create a new dscp to tc map + Table tcToDscpMapTable = Table(m_config_db.get(), CFG_TC_TO_DSCP_MAP_TABLE_NAME); + tcToDscpMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + gQosOrch->addExistingData(&tcToDscpMapTable); + static_cast(gQosOrch)->doTask(); + + std::deque entries; + entries.push_back({"MuxTunnel0", "SET", + { + {"decap_dscp_to_tc_map", "AZURE"}, + {"decap_tc_to_pg_map", "AZURE"}, + {"dscp_mode", "pipe"}, + {"dst_ip", "10.1.0.32"}, + {"encap_tc_to_dscp_map", "AZURE"}, + {"encap_tc_to_queue_map", "AZURE"}, + {"src_ip", "10.1.0.33"}, + {"ttl_mode", "pipe"}, + {"tunnel_type", "IPINIP"} + }}); + entries.push_back({"MuxTunnel1", "SET", + { + {"decap_dscp_to_tc_map", "AZURE"}, + {"dscp_mode", "pipe"}, + {"dst_ip", "10.1.0.32"}, + {"encap_tc_to_dscp_map", "AZURE"}, + {"encap_tc_to_queue_map", "AZURE"}, + {"src_ip", "10.1.0.33"}, + {"ttl_mode", "pipe"}, + {"tunnel_type", "IPINIP"} + }}); + auto consumer = dynamic_cast(tunnel_decap_orch->getExecutor(APP_TUNNEL_DECAP_TABLE_NAME)); + consumer->addToSync(entries); + // Drain TUNNEL_DECAP_TABLE table + static_cast(tunnel_decap_orch)->doTask(); + entries.clear(); + + // Set an attribute that is not supported by vendor + entries.push_back({"MuxTunnel1", "SET", + { + {"decap_tc_to_pg_map", "AZURE"} + }}); + consumer->addToSync(entries); + // Drain TUNNEL_DECAP_TABLE table + static_cast(tunnel_decap_orch)->doTask(); + entries.clear(); + + // Set attributes for the 2nd time + entries.push_back({"MuxTunnel0", "SET", + { + {"encap_ecn_mode", "standard"} + }}); + consumer->addToSync(entries); + // Drain TUNNEL_DECAP_TABLE table + static_cast(tunnel_decap_orch)->doTask(); + entries.clear(); + + // Set attributes for the 2nd time + entries.push_back({"MuxTunnel1", "SET", + { + {"ecn_mode", "copy_from_outer"} + }}); + consumer->addToSync(entries); + // Drain TUNNEL_DECAP_TABLE table + static_cast(tunnel_decap_orch)->doTask(); + entries.clear(); + } }