From 5a40b07197b3bd70b0d6db74e980f510390a5c2f Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Tue, 9 Jan 2024 12:41:39 +0100 Subject: [PATCH 01/21] add etcd and controllermanager services --- .../services/apiserver/apiserver_service.py | 4 +-- .../services/controllermanager/__init__.py | 0 .../controllermanager_client.py | 6 ++++ .../__init__.py | 0 ...ermanager_garbage_collection.metadata.json | 36 +++++++++++++++++++ .../controllermanager_garbage_collection.py | 25 +++++++++++++ .../controllermanager_service.py | 26 ++++++++++++++ .../kubernetes/services/etcd/__init__.py | 0 .../kubernetes/services/etcd/etcd_client.py | 4 +++ .../kubernetes/services/etcd/etcd_service.py | 24 +++++++++++++ .../etcd/etcd_tls_encryption/__init__.py | 0 .../etcd_tls_encryption.metadata.json | 36 +++++++++++++++++++ .../etcd_tls_encryption.py | 24 +++++++++++++ .../services/scheduler/scheduler_service.py | 4 +-- 14 files changed, 185 insertions(+), 4 deletions(-) create mode 100644 prowler/providers/kubernetes/services/controllermanager/__init__.py create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_client.py create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/__init__.py create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/controllermanager_garbage_collection.metadata.json create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/controllermanager_garbage_collection.py create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_service.py create mode 100644 prowler/providers/kubernetes/services/etcd/__init__.py create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_client.py create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_service.py create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/__init__.py create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.metadata.json create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.py diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_service.py b/prowler/providers/kubernetes/services/apiserver/apiserver_service.py index c85704157f..e46c0817e4 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_service.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_service.py @@ -9,9 +9,9 @@ def __init__(self, audit_info): super().__init__(audit_info) self.client = core_client - self.apiserver_pods = self.__get_apiserver_pod__() + self.apiserver_pods = self.__get_apiserver_pods__() - def __get_apiserver_pod__(self): + def __get_apiserver_pods__(self): try: apiserver_pods = [] for pod in self.client.pods.values(): diff --git a/prowler/providers/kubernetes/services/controllermanager/__init__.py b/prowler/providers/kubernetes/services/controllermanager/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_client.py b/prowler/providers/kubernetes/services/controllermanager/controllermanager_client.py new file mode 100644 index 0000000000..2abe3c0a06 --- /dev/null +++ b/prowler/providers/kubernetes/services/controllermanager/controllermanager_client.py @@ -0,0 +1,6 @@ +from prowler.providers.common.common import global_provider +from prowler.providers.kubernetes.services.controllermanager.controllermanager_service import ( + ControllerManager, +) + +controllermanager_client = ControllerManager(global_provider) diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/__init__.py b/prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/controllermanager_garbage_collection.metadata.json b/prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/controllermanager_garbage_collection.metadata.json new file mode 100644 index 0000000000..059e54d5d2 --- /dev/null +++ b/prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/controllermanager_garbage_collection.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "controllermanager_garbage_collection", + "CheckTitle": "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate", + "CheckType": [ + "Resource Management", + "Performance Optimization" + ], + "ServiceName": "kube-controller-manager", + "SubServiceName": "", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesControllerManager", + "Description": "Activate garbage collector on pod termination, as appropriate. Garbage collection is crucial for maintaining resource availability and performance. The default threshold for garbage collection is 12,500 terminated pods, which may be too high for some systems. Adjusting this threshold based on system resources and performance tests is recommended.", + "Risk": "A high threshold for garbage collection can lead to degraded performance and resource exhaustion. In extreme cases, it might cause system crashes or prolonged unavailability.", + "RelatedUrl": "https://github.com/kubernetes/kubernetes/issues/28484", + "Remediation": { + "Code": { + "CLI": "Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml and set --terminated-pod-gc-threshold to an appropriate value, e.g., --terminated-pod-gc-threshold=10", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Review and adjust the --terminated-pod-gc-threshold argument in the kube-controller-manager to ensure efficient garbage collection and optimal resource utilization.", + "Url": "https://kubernetes.io/docs/admin/kube-controller-manager/" + } + }, + "Categories": [ + "Cluster Stability", + "Operational Best Practices" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "The default value of --terminated-pod-gc-threshold is 12500. Adjust according to your specific cluster workload and performance requirements." +} diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/controllermanager_garbage_collection.py b/prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/controllermanager_garbage_collection.py new file mode 100644 index 0000000000..fa0996220b --- /dev/null +++ b/prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/controllermanager_garbage_collection.py @@ -0,0 +1,25 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.controllermanager.controllermanager_client import ( + controllermanager_client, +) + + +class controllermanager_garbage_collection(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in controllermanager_client.controllermanager_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = ( + "Controller Manager has an appropriate garbage collection threshold." + ) + for container in pod.containers.values(): + if "--terminated-pod-gc-threshold=12500" in container.command: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = "Controller Manager has the default garbage collection threshold." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_service.py b/prowler/providers/kubernetes/services/controllermanager/controllermanager_service.py new file mode 100644 index 0000000000..cc620f1f62 --- /dev/null +++ b/prowler/providers/kubernetes/services/controllermanager/controllermanager_service.py @@ -0,0 +1,26 @@ +from prowler.lib.logger import logger +from prowler.providers.kubernetes.lib.service.service import KubernetesService +from prowler.providers.kubernetes.services.core.core_client import core_client + + +################## ControllerManager ################## +class ControllerManager(KubernetesService): + def __init__(self, audit_info): + super().__init__(audit_info) + self.client = core_client + + self.controllermanager_pods = self.__get_controllermanager_pods__() + + def __get_controllermanager_pods__(self): + try: + controllermanager_pods = [] + for pod in self.client.pods.values(): + if pod.namespace == "kube-system" and pod.name.startswith( + "kube-controller-manager" + ): + controllermanager_pods.append(pod) + return controllermanager_pods + except Exception as error: + logger.error( + f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) diff --git a/prowler/providers/kubernetes/services/etcd/__init__.py b/prowler/providers/kubernetes/services/etcd/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/etcd/etcd_client.py b/prowler/providers/kubernetes/services/etcd/etcd_client.py new file mode 100644 index 0000000000..6d0626fa3a --- /dev/null +++ b/prowler/providers/kubernetes/services/etcd/etcd_client.py @@ -0,0 +1,4 @@ +from prowler.providers.common.common import global_provider +from prowler.providers.kubernetes.services.etcd.etcd_service import Etcd + +etcd_client = Etcd(global_provider) diff --git a/prowler/providers/kubernetes/services/etcd/etcd_service.py b/prowler/providers/kubernetes/services/etcd/etcd_service.py new file mode 100644 index 0000000000..993a682fd4 --- /dev/null +++ b/prowler/providers/kubernetes/services/etcd/etcd_service.py @@ -0,0 +1,24 @@ +from prowler.lib.logger import logger +from prowler.providers.kubernetes.lib.service.service import KubernetesService +from prowler.providers.kubernetes.services.core.core_client import core_client + + +################## Etcd ################## +class Etcd(KubernetesService): + def __init__(self, audit_info): + super().__init__(audit_info) + self.client = core_client + + self.etcd_pods = self.__get_etcd_pods__() + + def __get_etcd_pods__(self): + try: + etcd_pods = [] + for pod in self.client.pods.values(): + if pod.namespace == "kube-system" and pod.name.startswith("etcd"): + etcd_pods.append(pod) + return etcd_pods + except Exception as error: + logger.error( + f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) diff --git a/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/__init__.py b/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.metadata.json new file mode 100644 index 0000000000..0533717211 --- /dev/null +++ b/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "etcd_tls_encryption", + "CheckTitle": "Ensure that the --cert-file and --key-file arguments are set as appropriate for etcd", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "etcd", + "SubServiceName": "", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "Etcd", + "Description": "This check verifies that the etcd service in a Kubernetes cluster is configured with appropriate TLS encryption settings. etcd, being a key value store for all Kubernetes REST API objects, should have its communication encrypted to protect these sensitive objects in transit.", + "Risk": "Without proper TLS configuration, data stored in etcd can be susceptible to interception and unauthorized access, posing a significant security risk to the entire Kubernetes cluster.", + "RelatedUrl": "https://coreos.com/etcd/docs/latest/op-guide/security.html", + "Remediation": { + "Code": { + "CLI": "Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master node and set the --cert-file and --key-file arguments appropriately.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Ensure that the etcd service is configured with TLS encryption for secure communication. The --cert-file and --key-file arguments should point to a valid TLS certificate and key.", + "Url": "https://kubernetes.io/docs/admin/etcd/" + } + }, + "Categories": [ + "Cluster Security", + "Data Protection" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "By default, etcd may not be configured with TLS encryption. It is crucial to enable TLS to protect the sensitive data handled by etcd." +} diff --git a/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.py b/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.py new file mode 100644 index 0000000000..28791848bd --- /dev/null +++ b/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.py @@ -0,0 +1,24 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.etcd.etcd_client import etcd_client + + +class etcd_tls_encryption(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in etcd_client.etcd_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "FAIL" + report.status_extended = "Etcd does not have TLS encryption configured." + for container in pod.containers.values(): + if ( + "--cert-file" in container.command + and "--key-file" in container.command + ): + report.resource_id = container.name + report.status = "PASS" + report.status_extended = "Etcd has configured TLS encryption." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/scheduler/scheduler_service.py b/prowler/providers/kubernetes/services/scheduler/scheduler_service.py index edd8455c3d..f0d54a9948 100644 --- a/prowler/providers/kubernetes/services/scheduler/scheduler_service.py +++ b/prowler/providers/kubernetes/services/scheduler/scheduler_service.py @@ -9,9 +9,9 @@ def __init__(self, audit_info): super().__init__(audit_info) self.client = core_client - self.scheduler_pods = self.__get_scheduler_pod__() + self.scheduler_pods = self.__get_scheduler_pods__() - def __get_scheduler_pod__(self): + def __get_scheduler_pods__(self): try: scheduler_pods = [] for pod in self.client.pods.values(): From cc76a4201fb0185ea158013fbd02db9e6e3d0ae9 Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Tue, 9 Jan 2024 16:47:09 +0100 Subject: [PATCH 02/21] add rbac service --- .../kubernetes/services/rbac/__init__.py | 0 .../kubernetes/services/rbac/rbac_client.py | 4 ++ .../rbac/rbac_cluster_admin_usage/__init__.py | 0 .../rbac_cluster_admin_usage.metadata.json | 36 +++++++++++ .../rbac_cluster_admin_usage.py | 21 ++++++ .../kubernetes/services/rbac/rbac_service.py | 64 +++++++++++++++++++ 6 files changed, 125 insertions(+) create mode 100644 prowler/providers/kubernetes/services/rbac/__init__.py create mode 100644 prowler/providers/kubernetes/services/rbac/rbac_client.py create mode 100644 prowler/providers/kubernetes/services/rbac/rbac_cluster_admin_usage/__init__.py create mode 100644 prowler/providers/kubernetes/services/rbac/rbac_cluster_admin_usage/rbac_cluster_admin_usage.metadata.json create mode 100644 prowler/providers/kubernetes/services/rbac/rbac_cluster_admin_usage/rbac_cluster_admin_usage.py create mode 100644 prowler/providers/kubernetes/services/rbac/rbac_service.py diff --git a/prowler/providers/kubernetes/services/rbac/__init__.py b/prowler/providers/kubernetes/services/rbac/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/rbac/rbac_client.py b/prowler/providers/kubernetes/services/rbac/rbac_client.py new file mode 100644 index 0000000000..bbbc72585b --- /dev/null +++ b/prowler/providers/kubernetes/services/rbac/rbac_client.py @@ -0,0 +1,4 @@ +from prowler.providers.common.common import global_provider +from prowler.providers.kubernetes.services.rbac.rbac_service import Rbac + +rbac_client = Rbac(global_provider) diff --git a/prowler/providers/kubernetes/services/rbac/rbac_cluster_admin_usage/__init__.py b/prowler/providers/kubernetes/services/rbac/rbac_cluster_admin_usage/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/rbac/rbac_cluster_admin_usage/rbac_cluster_admin_usage.metadata.json b/prowler/providers/kubernetes/services/rbac/rbac_cluster_admin_usage/rbac_cluster_admin_usage.metadata.json new file mode 100644 index 0000000000..d0a3093496 --- /dev/null +++ b/prowler/providers/kubernetes/services/rbac/rbac_cluster_admin_usage/rbac_cluster_admin_usage.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "rbac_cluster_admin_usage", + "CheckTitle": "Ensure that the cluster-admin role is only used where required", + "CheckType": [ + "Security", + "Compliance" + ], + "ServiceName": "RBAC", + "SubServiceName": "Authorization", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "ClusterRoleBinding", + "Description": "This check ensures that the 'cluster-admin' role, which provides wide-ranging powers, is used only where necessary. The 'cluster-admin' role grants super-user access to perform any action on any resource, including all namespaces. It should be applied cautiously to avoid excessive privileges.", + "Risk": "Inappropriate use of the 'cluster-admin' role can lead to excessive privileges, increasing the risk of malicious actions and potentially impacting the cluster's security posture.", + "RelatedUrl": "https://kubernetes.io/docs/admin/authorization/rbac/#user-facing-roles", + "Remediation": { + "Code": { + "CLI": "Review and, if necessary, modify the ClusterRoleBindings to limit the use of 'cluster-admin'. Use 'kubectl delete clusterrolebinding [name]' to remove unnecessary bindings.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Audit and assess the use of 'cluster-admin' role in all ClusterRoleBindings. Ensure it is assigned only to subjects that require such extensive privileges. Consider using more restrictive roles wherever possible.", + "Url": "https://kubernetes.io/docs/admin/authorization/rbac/#clusterrolebinding-example" + } + }, + "Categories": [ + "Access Control", + "Least Privilege Principle" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Modifying ClusterRoleBindings should be done with caution to avoid unintended access issues. Always ensure that critical system components have the necessary permissions to operate effectively." +} diff --git a/prowler/providers/kubernetes/services/rbac/rbac_cluster_admin_usage/rbac_cluster_admin_usage.py b/prowler/providers/kubernetes/services/rbac/rbac_cluster_admin_usage/rbac_cluster_admin_usage.py new file mode 100644 index 0000000000..9a5487bae4 --- /dev/null +++ b/prowler/providers/kubernetes/services/rbac/rbac_cluster_admin_usage/rbac_cluster_admin_usage.py @@ -0,0 +1,21 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.rbac.rbac_client import rbac_client + + +class rbac_cluster_admin_usage(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + # Iterate through the bindings + for binding in rbac_client.cluster_role_bindings: + # Check if the binding refers to the cluster-admin role + if binding.roleRef.name == "cluster-admin": + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = ( + "" if not binding.metadata.namespace else binding.metadata.namespace + ) + report.resource_name = binding.metadata.name + report.resource_id = binding.metadata.uid + report.status = "INFO" + report.status_extended = f"Cluster Role Binding {binding.metadata.name} uses cluster-admin role." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/rbac/rbac_service.py b/prowler/providers/kubernetes/services/rbac/rbac_service.py new file mode 100644 index 0000000000..acb6fe4057 --- /dev/null +++ b/prowler/providers/kubernetes/services/rbac/rbac_service.py @@ -0,0 +1,64 @@ +from typing import Any, List, Optional + +from kubernetes import client +from pydantic import BaseModel + +from prowler.lib.logger import logger +from prowler.providers.kubernetes.lib.service.service import KubernetesService + + +################## Rbac ################## +class Rbac(KubernetesService): + def __init__(self, audit_info): + super().__init__(audit_info) + self.client = client.RbacAuthorizationV1Api() + + self.cluster_role_bindings = self.__list_cluster_role_binding__() + + def __list_cluster_role_binding__(self): + try: + bindings_list = [] + for binding in self.client.list_cluster_role_binding().items: + # For each binding, create a ClusterRoleBinding object and append it to the list + formatted_binding = { + "metadata": binding.metadata, + "subjects": [] + if not binding.subjects + else [ + { + "kind": subject.kind, + "name": subject.name, + "namespace": getattr(subject, "namespace", None), + } + for subject in binding.subjects + ], + "roleRef": { + "kind": binding.role_ref.kind, + "name": binding.role_ref.name, + "apiGroup": binding.role_ref.api_group, + }, + } + bindings_list.append(ClusterRoleBinding(**formatted_binding)) + return bindings_list + except Exception as error: + logger.error( + f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) + + +class Subject(BaseModel): + kind: str + name: str + namespace: Optional[str] = None + + +class RoleRef(BaseModel): + kind: str + name: str + apiGroup: str + + +class ClusterRoleBinding(BaseModel): + metadata: Any + subjects: List[Subject] + roleRef: RoleRef From 3632aafcd6d539cd7ba0dd8f6c83f943b5b3dfff Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Tue, 9 Jan 2024 17:45:51 +0100 Subject: [PATCH 03/21] add apiserver checks --- .../__init__.py | 0 ...er_deny_service_external_ips.metadata.json | 36 +++++++++++++++++++ .../apiserver_deny_service_external_ips.py | 28 +++++++++++++++ .../apiserver_kubelet_cert_auth/__init__.py | 0 .../apiserver_kubelet_cert_auth.metadata.json | 36 +++++++++++++++++++ .../apiserver_kubelet_cert_auth.py | 25 +++++++++++++ .../apiserver_kubelet_tls_auth/__init__.py | 0 .../apiserver_kubelet_tls_auth.metadata.json | 36 +++++++++++++++++++ .../apiserver_kubelet_tls_auth.py | 28 +++++++++++++++ .../apiserver_no_token_auth_file/__init__.py | 0 ...apiserver_no_token_auth_file.metadata.json | 36 +++++++++++++++++++ .../apiserver_no_token_auth_file.py | 23 ++++++++++++ .../rbac_cluster_admin_usage.py | 2 +- 13 files changed, 249 insertions(+), 1 deletion(-) create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/apiserver_deny_service_external_ips.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/apiserver_deny_service_external_ips.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_cert_auth/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_cert_auth/apiserver_kubelet_cert_auth.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_cert_auth/apiserver_kubelet_cert_auth.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/apiserver_no_token_auth_file.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/apiserver_no_token_auth_file.py diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/apiserver_deny_service_external_ips.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/apiserver_deny_service_external_ips.metadata.json new file mode 100644 index 0000000000..847d97be53 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/apiserver_deny_service_external_ips.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_deny_service_external_ips", + "CheckTitle": "Ensure that the DenyServiceExternalIPs is set", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Admission Controllers", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures the DenyServiceExternalIPs admission controller is enabled, which rejects all new usage of the Service field externalIPs. Enabling this controller enhances security by preventing the misuse of the externalIPs field.", + "Risk": "Not setting the DenyServiceExternalIPs admission controller could allow users to create Services with external IPs, potentially exposing services to security risks.", + "RelatedUrl": "https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#denyserviceexternalips", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver manifest to include '--disable-admission-plugins=DenyServiceExternalIPs' in the API server's command arguments.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Enable the DenyServiceExternalIPs admission controller by setting the '--disable-admission-plugins' argument in the kube-apiserver configuration.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Network Policy", + "Best Practices" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Consider the impact on existing services before enabling this admission controller, as it can restrict the usage of external IPs in the cluster." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/apiserver_deny_service_external_ips.py b/prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/apiserver_deny_service_external_ips.py new file mode 100644 index 0000000000..b2c0ba4612 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/apiserver_deny_service_external_ips.py @@ -0,0 +1,28 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_deny_service_external_ips(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = ( + "API Server has DenyServiceExternalIPs admission controller enabled." + ) + for container in pod.containers.values(): + if ( + "--disable-admission-plugins=DenyServiceExternalIPs" + in container.command + ): + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = f"API Server does not have DenyServiceExternalIPs enabled in container {container.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_cert_auth/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_cert_auth/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_cert_auth/apiserver_kubelet_cert_auth.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_cert_auth/apiserver_kubelet_cert_auth.metadata.json new file mode 100644 index 0000000000..e43b06b405 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_cert_auth/apiserver_kubelet_cert_auth.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_kubelet_cert_auth", + "CheckTitle": "Ensure that the --kubelet-certificate-authority argument is set as appropriate", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "TLS Verification", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures that the Kubernetes API server is set up with a specified certificate authority for kubelet connections, using the --kubelet-certificate-authority argument. This setup is crucial for verifying the kubelet's certificate to prevent man-in-the-middle attacks during connections from the apiserver to the kubelet.", + "Risk": "Without the --kubelet-certificate-authority argument, connections to kubelets are not verified, increasing the risk of man-in-the-middle attacks, especially over untrusted networks.", + "RelatedUrl": "https://kubernetes.io/docs/admin/kubelet-authentication-authorization/", + "Remediation": { + "Code": { + "CLI": "Set the --kubelet-certificate-authority argument in the kube-apiserver configuration to the path of the CA certificate file. Example: --kubelet-certificate-authority=/path/to/ca-file", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Enable TLS verification between the apiserver and kubelets by specifying the certificate authority in the kube-apiserver configuration.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Cluster Security", + "Communication Security" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "By default, the kube-apiserver does not verify kubelet certificates. Enabling this setting enhances the security of master-node communications." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_cert_auth/apiserver_kubelet_cert_auth.py b/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_cert_auth/apiserver_kubelet_cert_auth.py new file mode 100644 index 0000000000..e5cbf89e01 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_cert_auth/apiserver_kubelet_cert_auth.py @@ -0,0 +1,25 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_kubelet_cert_auth(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = ( + "API Server has appropriate kubelet certificate authority configured." + ) + for container in pod.containers.values(): + if "--kubelet-certificate-authority" not in container.command: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = f"API Server is missing kubelet certificate authority configuration in container {container.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.metadata.json new file mode 100644 index 0000000000..78c574b8d6 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_no_token_auth_file", + "CheckTitle": "Ensure that the --token-auth-file parameter is not set", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Authentication", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures that the Kubernetes API server is not using static token-based authentication, which is less secure. Static tokens are stored in clear-text and lack features like revocation or rotation without restarting the API server.", + "Risk": "Using static token-based authentication exposes the cluster to security risks due to the static nature of the tokens, their clear-text storage, and the inability to revoke or rotate them easily.", + "RelatedUrl": "https://kubernetes.io/docs/admin/authentication/#static-token-file", + "Remediation": { + "Code": { + "CLI": "Remove the --token-auth-file parameter from the kube-apiserver configuration. Edit /etc/kubernetes/manifests/kube-apiserver.yaml on the master node and remove the --token-auth-file= parameter.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Replace token-based authentication with more secure mechanisms like client certificate authentication. Ensure the --token-auth-file argument is not used in the API server configuration.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Access Control", + "Best Practices" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "By default, the --token-auth-file argument is not set in the kube-apiserver. Ensure it remains unset or is removed if currently in use." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.py b/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.py new file mode 100644 index 0000000000..b0a84cfafd --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.py @@ -0,0 +1,28 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_kubelet_tls_auth(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = ( + "API Server has appropriate kubelet TLS authentication configured." + ) + for container in pod.containers.values(): + if ( + "--kubelet-client-certificate" not in container.command + or "--kubelet-client-key" not in container.command + ): + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = f"API Server is missing kubelet TLS authentication arguments in container {container.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/apiserver_no_token_auth_file.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/apiserver_no_token_auth_file.metadata.json new file mode 100644 index 0000000000..78c574b8d6 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/apiserver_no_token_auth_file.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_no_token_auth_file", + "CheckTitle": "Ensure that the --token-auth-file parameter is not set", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Authentication", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures that the Kubernetes API server is not using static token-based authentication, which is less secure. Static tokens are stored in clear-text and lack features like revocation or rotation without restarting the API server.", + "Risk": "Using static token-based authentication exposes the cluster to security risks due to the static nature of the tokens, their clear-text storage, and the inability to revoke or rotate them easily.", + "RelatedUrl": "https://kubernetes.io/docs/admin/authentication/#static-token-file", + "Remediation": { + "Code": { + "CLI": "Remove the --token-auth-file parameter from the kube-apiserver configuration. Edit /etc/kubernetes/manifests/kube-apiserver.yaml on the master node and remove the --token-auth-file= parameter.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Replace token-based authentication with more secure mechanisms like client certificate authentication. Ensure the --token-auth-file argument is not used in the API server configuration.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Access Control", + "Best Practices" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "By default, the --token-auth-file argument is not set in the kube-apiserver. Ensure it remains unset or is removed if currently in use." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/apiserver_no_token_auth_file.py b/prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/apiserver_no_token_auth_file.py new file mode 100644 index 0000000000..6690dbeebd --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/apiserver_no_token_auth_file.py @@ -0,0 +1,23 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_no_token_auth_file(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = "API Server does not have token-auth-file enabled." + for container in pod.containers.values(): + if "--token-auth-file" in container.command: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = f"API Server has token-auth-file enabled in container {container.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/rbac/rbac_cluster_admin_usage/rbac_cluster_admin_usage.py b/prowler/providers/kubernetes/services/rbac/rbac_cluster_admin_usage/rbac_cluster_admin_usage.py index 9a5487bae4..3c9a754987 100644 --- a/prowler/providers/kubernetes/services/rbac/rbac_cluster_admin_usage/rbac_cluster_admin_usage.py +++ b/prowler/providers/kubernetes/services/rbac/rbac_cluster_admin_usage/rbac_cluster_admin_usage.py @@ -15,7 +15,7 @@ def execute(self) -> Check_Report_Kubernetes: ) report.resource_name = binding.metadata.name report.resource_id = binding.metadata.uid - report.status = "INFO" + report.status = "MANUAL" report.status_extended = f"Cluster Role Binding {binding.metadata.name} uses cluster-admin role." findings.append(report) return findings From 8294284eb39b014756eea2656205996660abf79f Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Wed, 10 Jan 2024 10:23:49 +0100 Subject: [PATCH 04/21] add apiserver checks part 2 --- .../__init__.py | 0 ...rver_alwayspullimages_plugin.metadata.json | 36 ++++++++++++++++ .../apiserver_alwayspullimages_plugin.py | 31 ++++++++++++++ .../__init__.py | 0 ...erver_auth_mode_include_node.metadata.json | 36 ++++++++++++++++ .../apiserver_auth_mode_include_node.py | 26 ++++++++++++ .../__init__.py | 0 ...erver_auth_mode_include_rbac.metadata.json | 36 ++++++++++++++++ .../apiserver_auth_mode_include_rbac.py | 26 ++++++++++++ .../__init__.py | 0 ...r_auth_mode_not_always_allow.metadata.json | 36 ++++++++++++++++ .../apiserver_auth_mode_not_always_allow.py | 25 +++++++++++ ...er_deny_service_external_ips.metadata.json | 2 +- .../apiserver_event_rate_limit/__init__.py | 0 .../apiserver_event_rate_limit.metadata.json | 36 ++++++++++++++++ .../apiserver_event_rate_limit.py | 32 +++++++++++++++ .../apiserver_kubelet_tls_auth.metadata.json | 2 +- .../__init__.py | 0 ...server_no_alwaysadmit_plugin.metadata.json | 36 ++++++++++++++++ .../apiserver_no_alwaysadmit_plugin.py | 27 ++++++++++++ ...apiserver_no_token_auth_file.metadata.json | 2 +- .../__init__.py | 0 ...r_securitycontextdeny_plugin.metadata.json | 36 ++++++++++++++++ .../apiserver_securitycontextdeny_plugin.py | 36 ++++++++++++++++ .../__init__.py | 0 ...server_serviceaccount_plugin.metadata.json | 36 ++++++++++++++++ .../apiserver_serviceaccount_plugin.py | 41 +++++++++++++++++++ .../kubernetes/services/rbac/rbac_service.py | 2 +- 28 files changed, 536 insertions(+), 4 deletions(-) create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/apiserver_auth_mode_include_node.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/apiserver_auth_mode_include_node.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/apiserver_auth_mode_include_rbac.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/apiserver_auth_mode_include_rbac.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_not_always_allow/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_not_always_allow/apiserver_auth_mode_not_always_allow.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_not_always_allow/apiserver_auth_mode_not_always_allow.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/apiserver_event_rate_limit.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/apiserver_event_rate_limit.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/apiserver_securitycontextdeny_plugin.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/apiserver_securitycontextdeny_plugin.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/apiserver_serviceaccount_plugin.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/apiserver_serviceaccount_plugin.py diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.metadata.json new file mode 100644 index 0000000000..a4bc730671 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_alwayspullimages_plugin", + "CheckTitle": "Ensure that the admission control plugin AlwaysPullImages is set", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Admission Control", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesAPIServer", + "Description": "This check verifies that the AlwaysPullImages admission control plugin is enabled in the Kubernetes API server. This plugin ensures that every new pod always pulls the required images, enforcing image access control and preventing the use of possibly outdated or altered images.", + "Risk": "Without AlwaysPullImages, once an image is pulled to a node, any pod can use it without any authorization check, potentially leading to security risks.", + "RelatedUrl": "https://kubernetes.io/docs/admin/admission-controllers/#alwayspullimages", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to include AlwaysPullImages in the --enable-admission-plugins argument. Example: --enable-admission-plugins=...,AlwaysPullImages,...", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Configure the API server to use the AlwaysPullImages admission control plugin to ensure image security and integrity.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Image Security", + "Cluster Security" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Enabling AlwaysPullImages can increase network and registry load and decrease container startup speed. It may not be suitable for all environments." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.py new file mode 100644 index 0000000000..cda8f5c24b --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.py @@ -0,0 +1,31 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_event_rate_limit(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = "AlwaysPullImages admission control plugin is set." + plugin_set = False + for container in pod.containers.values(): + if "--enable-admission-plugins" in container.command: + admission_plugins = container.command.split( + "--enable-admission-plugins=" + )[1].split(",") + if "AlwaysPullImages" in admission_plugins: + plugin_set = True + break + if not plugin_set: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = "AlwaysPullImages admission control plugin is not set in container {container.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/apiserver_auth_mode_include_node.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/apiserver_auth_mode_include_node.metadata.json new file mode 100644 index 0000000000..1d4a7d244f --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/apiserver_auth_mode_include_node.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_auth_mode_include_node", + "CheckTitle": "Ensure that the --authorization-mode argument includes Node", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Authorization", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures that the Kubernetes API server is configured to include 'Node' in its --authorization-mode argument. This mode restricts kubelets to only read objects associated with their nodes, enhancing security.", + "Risk": "If the Node authorization mode is not included, kubelets may have broader access than necessary, which can pose a security risk.", + "RelatedUrl": "https://kubernetes.io/docs/admin/authorization/node/", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to include Node in the --authorization-mode argument. Example: --authorization-mode=Node,RBAC", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Configure the API server to use Node authorization mode along with other modes like RBAC to restrict kubelet access to the necessary resources.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Access Control", + "Best Practices" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "By default, Node authorization is not enabled in Kubernetes. It is important to set this for restricting kubelet nodes appropriately." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/apiserver_auth_mode_include_node.py b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/apiserver_auth_mode_include_node.py new file mode 100644 index 0000000000..966cb6af7c --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/apiserver_auth_mode_include_node.py @@ -0,0 +1,26 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_auth_mode_include_node(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = "API Server authorization mode includes Node." + for container in pod.containers.values(): + if ( + "--authorization-mode" in container.command + and "Node" not in container.command + ): + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = f"API Server authorization mode does not include Node in container {container.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/apiserver_auth_mode_include_rbac.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/apiserver_auth_mode_include_rbac.metadata.json new file mode 100644 index 0000000000..c1214eea25 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/apiserver_auth_mode_include_rbac.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_auth_mode_include_rbac", + "CheckTitle": "Ensure that the --authorization-mode argument includes RBAC", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Authorization", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "KubernetesAPIServer", + "Description": "This check verifies that Role Based Access Control (RBAC) is enabled in the Kubernetes API server's authorization mode. RBAC allows for fine-grained control over cluster operations and is recommended for secure and manageable access control.", + "Risk": "If RBAC is not included in the API server's authorization mode, the cluster may not be leveraging fine-grained access controls, leading to potential security risks.", + "RelatedUrl": "https://kubernetes.io/docs/reference/access-authn-authz/rbac/", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to include RBAC in the --authorization-mode argument. Example: --authorization-mode=Node,RBAC", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Ensure that the API server is configured with RBAC authorization mode for enhanced security and access control.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Access Control", + "Best Practices" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "By default, Kubernetes API server may not use RBAC authorization. It is crucial to enable this setting to ensure proper access control in the cluster." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/apiserver_auth_mode_include_rbac.py b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/apiserver_auth_mode_include_rbac.py new file mode 100644 index 0000000000..c2ab402e00 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/apiserver_auth_mode_include_rbac.py @@ -0,0 +1,26 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_auth_mode_include_rbac(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = "API Server authorization mode includes RBAC." + for container in pod.containers.values(): + if ( + "--authorization-mode" in container.command + and "RBAC" not in container.command + ): + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = "API Server authorization mode does not include RBAC in container {container.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_not_always_allow/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_not_always_allow/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_not_always_allow/apiserver_auth_mode_not_always_allow.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_not_always_allow/apiserver_auth_mode_not_always_allow.metadata.json new file mode 100644 index 0000000000..ce50968ffb --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_not_always_allow/apiserver_auth_mode_not_always_allow.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_auth_mode_not_always_allow", + "CheckTitle": "Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Authorization", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures that the Kubernetes API server is not configured to always authorize all requests. The 'AlwaysAllow' mode bypasses all authorization checks, which should not be used on production clusters.", + "Risk": "If set to AlwaysAllow, the API server would authorize all requests, potentially leading to unauthorized access and security vulnerabilities.", + "RelatedUrl": "https://kubernetes.io/docs/admin/authorization/", + "Remediation": { + "Code": { + "CLI": "Set the --authorization-mode argument in the kube-apiserver configuration to a secure mode such as RBAC. Example: --authorization-mode=RBAC", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Ensure the API server is using a secure authorization mode, such as RBAC, and not set to AlwaysAllow.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Access Control", + "Cluster Security" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "By default, AlwaysAllow is not enabled in kube-apiserver. It's crucial to maintain this setting for the security of the cluster." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_not_always_allow/apiserver_auth_mode_not_always_allow.py b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_not_always_allow/apiserver_auth_mode_not_always_allow.py new file mode 100644 index 0000000000..3a934b63c8 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_not_always_allow/apiserver_auth_mode_not_always_allow.py @@ -0,0 +1,25 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_auth_mode_not_always_allow(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = ( + "API Server authorization mode is not set to AlwaysAllow." + ) + for container in pod.containers.values(): + if "--authorization-mode=AlwaysAllow" in container.command: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = f"API Server authorization mode is set to AlwaysAllow in container {container.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/apiserver_deny_service_external_ips.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/apiserver_deny_service_external_ips.metadata.json index 847d97be53..15a986c567 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/apiserver_deny_service_external_ips.metadata.json +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/apiserver_deny_service_external_ips.metadata.json @@ -28,7 +28,7 @@ }, "Categories": [ "Network Policy", - "Best Practices" + "Security Best Practices" ], "DependsOn": [], "RelatedTo": [], diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/apiserver_event_rate_limit.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/apiserver_event_rate_limit.metadata.json new file mode 100644 index 0000000000..ee62ce0de1 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/apiserver_event_rate_limit.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_event_rate_limit", + "CheckTitle": "Ensure that the admission control plugin EventRateLimit is set", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Admission Control", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesAPIServer", + "Description": "This check verifies if the Kubernetes API server is configured with the EventRateLimit admission control plugin. This plugin limits the rate of events accepted by the API Server, preventing potential DoS attacks by misbehaving workloads.", + "Risk": "Without EventRateLimit, the API server could be overwhelmed by a high number of events, leading to DoS and performance issues.", + "RelatedUrl": "https://kubernetes.io/docs/admin/admission-controllers/#eventratelimit", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to include EventRateLimit in the --enable-admission-plugins argument and specify a configuration file. Example: --enable-admission-plugins=...,EventRateLimit,... --admission-control-config-file=/path/to/configuration/file", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Configure EventRateLimit as an admission control plugin for the API server to manage the rate of incoming events effectively.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Resource Management", + "Best Practices" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Tuning EventRateLimit requires careful consideration of the specific requirements of your environment." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/apiserver_event_rate_limit.py b/prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/apiserver_event_rate_limit.py new file mode 100644 index 0000000000..09a8c512cd --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/apiserver_event_rate_limit.py @@ -0,0 +1,32 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_event_rate_limit(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = "EventRateLimit admission control plugin is set." + plugin_set = False + for container in pod.containers.values(): + if "--enable-admission-plugins" in container.command: + admission_plugins = container.command.split( + "--enable-admission-plugins=" + )[1].split(",") + if "EventRateLimit" not in admission_plugins: + plugin_set = True + break + if not plugin_set: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = f"EventRateLimit admission control plugin is not set in container {container.name}." + + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.metadata.json index 78c574b8d6..aef64b3bc7 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.metadata.json +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.metadata.json @@ -28,7 +28,7 @@ }, "Categories": [ "Access Control", - "Best Practices" + "Security Best Practices" ], "DependsOn": [], "RelatedTo": [], diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.metadata.json new file mode 100644 index 0000000000..ee62ce0de1 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_event_rate_limit", + "CheckTitle": "Ensure that the admission control plugin EventRateLimit is set", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Admission Control", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesAPIServer", + "Description": "This check verifies if the Kubernetes API server is configured with the EventRateLimit admission control plugin. This plugin limits the rate of events accepted by the API Server, preventing potential DoS attacks by misbehaving workloads.", + "Risk": "Without EventRateLimit, the API server could be overwhelmed by a high number of events, leading to DoS and performance issues.", + "RelatedUrl": "https://kubernetes.io/docs/admin/admission-controllers/#eventratelimit", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to include EventRateLimit in the --enable-admission-plugins argument and specify a configuration file. Example: --enable-admission-plugins=...,EventRateLimit,... --admission-control-config-file=/path/to/configuration/file", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Configure EventRateLimit as an admission control plugin for the API server to manage the rate of incoming events effectively.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Resource Management", + "Best Practices" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Tuning EventRateLimit requires careful consideration of the specific requirements of your environment." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.py new file mode 100644 index 0000000000..6cdbd443ff --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.py @@ -0,0 +1,27 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_event_rate_limit(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = "AlwaysAdmit admission control plugin is not set." + for container in pod.containers.values(): + if "--enable-admission-plugins" in container.command: + admission_plugins = container.command.split( + "--enable-admission-plugins=" + )[1].split(",") + if "AlwaysAdmit" in admission_plugins: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = "AlwaysAdmit admission control plugin is set in container {container.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/apiserver_no_token_auth_file.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/apiserver_no_token_auth_file.metadata.json index 78c574b8d6..aef64b3bc7 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/apiserver_no_token_auth_file.metadata.json +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/apiserver_no_token_auth_file.metadata.json @@ -28,7 +28,7 @@ }, "Categories": [ "Access Control", - "Best Practices" + "Security Best Practices" ], "DependsOn": [], "RelatedTo": [], diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/apiserver_securitycontextdeny_plugin.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/apiserver_securitycontextdeny_plugin.metadata.json new file mode 100644 index 0000000000..b4b262d76c --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/apiserver_securitycontextdeny_plugin.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_securitycontextdeny_plugin", + "CheckTitle": "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Admission Control", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesAPIServer", + "Description": "This check verifies that the SecurityContextDeny admission control plugin is enabled in the Kubernetes API server if PodSecurityPolicy is not used. The SecurityContextDeny plugin denies pods that make use of certain SecurityContext fields which could allow privilege escalation.", + "Risk": "Without SecurityContextDeny, pods may be able to escalate privileges if PodSecurityPolicy is not used, potentially leading to security vulnerabilities.", + "RelatedUrl": "https://kubernetes.io/docs/admin/admission-controllers/#securitycontextdeny", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to include SecurityContextDeny in the --enable-admission-plugins argument, unless PodSecurityPolicy is already in use. Example: --enable-admission-plugins=...,SecurityContextDeny,...", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Use SecurityContextDeny as an admission control plugin in the API server to enhance security, especially in the absence of PodSecurityPolicy.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Pod Security", + "Best Practices" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "SecurityContextDeny is recommended in environments where PodSecurityPolicy is not implemented to prevent potential privilege escalations." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/apiserver_securitycontextdeny_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/apiserver_securitycontextdeny_plugin.py new file mode 100644 index 0000000000..895806ada1 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/apiserver_securitycontextdeny_plugin.py @@ -0,0 +1,36 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_event_rate_limit(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = "SecurityContextDeny admission control plugin is set or PodSecurityPolicy is in use." + security_context_deny_set = False + pod_security_policy_set = False + for container in pod.containers.values(): + if "--enable-admission-plugins" in container.command: + admission_plugins = container.command.split( + "--enable-admission-plugins=" + )[1].split(",") + security_context_deny_set = ( + "SecurityContextDeny" in admission_plugins + ) + pod_security_policy_set = "PodSecurityPolicy" in admission_plugins + + if security_context_deny_set or pod_security_policy_set: + report.status = "PASS" + else: + report.status = "FAIL" + report.status_extended = "Neither SecurityContextDeny nor PodSecurityPolicy admission control plugins are set in container {container.name}." + + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/apiserver_serviceaccount_plugin.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/apiserver_serviceaccount_plugin.metadata.json new file mode 100644 index 0000000000..da5c19e6d8 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/apiserver_serviceaccount_plugin.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_serviceaccount_plugin", + "CheckTitle": "Ensure that the admission control plugin ServiceAccount is set", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Admission Control", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesAPIServer", + "Description": "This check verifies that the ServiceAccount admission control plugin is enabled in the Kubernetes API server. This plugin automates the creation and assignment of service accounts to pods, enhancing security by managing service account tokens.", + "Risk": "If the ServiceAccount admission plugin is disabled, pods might be assigned the default service account without proper token management, leading to potential security risks.", + "RelatedUrl": "https://kubernetes.io/docs/admin/admission-controllers/#serviceaccount", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to ensure that ServiceAccount is included in the --enable-admission-plugins argument. Remove the plugin from --disable-admission-plugins if present.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Enable the ServiceAccount admission control plugin in the API server to manage service accounts and tokens securely.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Access Control", + "Service Accounts" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "ServiceAccount plugin is usually enabled by default, ensuring automated management of service accounts and their associated tokens." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/apiserver_serviceaccount_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/apiserver_serviceaccount_plugin.py new file mode 100644 index 0000000000..127f475a77 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/apiserver_serviceaccount_plugin.py @@ -0,0 +1,41 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_event_rate_limit(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = "ServiceAccount admission control plugin is set." + + service_account_plugin_set = False + for container in pod.containers.values(): + # Check if "--enable-admission-plugins" includes "ServiceAccount" + # and "--disable-admission-plugins" does not include "ServiceAccount" + if "--enable-admission-plugins" in container.command: + admission_plugins = container.command.split( + "--enable-admission-plugins=" + )[1].split(",") + if "ServiceAccount" in admission_plugins: + service_account_plugin_set = True + if "--disable-admission-plugins" in container.command: + disabled_plugins = container.command.split( + "--disable-admission-plugins=" + )[1].split(",") + if "ServiceAccount" in disabled_plugins: + service_account_plugin_set = False + + if not service_account_plugin_set: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = "ServiceAccount admission control plugin is not set in container {container.name}." + + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/rbac/rbac_service.py b/prowler/providers/kubernetes/services/rbac/rbac_service.py index acb6fe4057..5b00ed9875 100644 --- a/prowler/providers/kubernetes/services/rbac/rbac_service.py +++ b/prowler/providers/kubernetes/services/rbac/rbac_service.py @@ -28,7 +28,7 @@ def __list_cluster_role_binding__(self): { "kind": subject.kind, "name": subject.name, - "namespace": getattr(subject, "namespace", None), + "namespace": getattr(subject, "namespace", ""), } for subject in binding.subjects ], From 86e12bf796cb6649d24712010deb00a89f3c2d84 Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Wed, 10 Jan 2024 12:59:28 +0100 Subject: [PATCH 05/21] add apiserver checks part 3 --- .../apiserver_alwayspullimages_plugin.py | 2 +- .../__init__.py | 0 ...iserver_audit_log_maxage_set.metadata.json | 36 ++++++++++++++++ .../apiserver_audit_log_maxage_set.py | 37 ++++++++++++++++ .../__init__.py | 0 ...rver_audit_log_maxbackup_set.metadata.json | 36 ++++++++++++++++ .../apiserver_audit_log_maxbackup_set.py | 38 ++++++++++++++++ .../__init__.py | 0 ...server_audit_log_maxsize_set.metadata.json | 36 ++++++++++++++++ .../apiserver_audit_log_maxsize_set.py | 37 ++++++++++++++++ .../apiserver_audit_log_path_set/__init__.py | 0 ...apiserver_audit_log_path_set.metadata.json | 36 ++++++++++++++++ .../apiserver_audit_log_path_set.py | 33 ++++++++++++++ .../apiserver_client_ca_file_set/__init__.py | 0 ...apiserver_client_ca_file_set.metadata.json | 36 ++++++++++++++++ .../apiserver_client_ca_file_set.py | 34 +++++++++++++++ .../apiserver_disable_profiling/__init__.py | 0 .../apiserver_disable_profiling.metadata.json | 36 ++++++++++++++++ .../apiserver_disable_profiling.py | 31 +++++++++++++ .../__init__.py | 0 ...cryption_provider_config_set.metadata.json | 36 ++++++++++++++++ ...piserver_encryption_provider_config_set.py | 33 ++++++++++++++ .../apiserver_etcd_cafile_set/__init__.py | 0 .../apiserver_etcd_cafile_set.metadata.json | 36 ++++++++++++++++ .../apiserver_etcd_cafile_set.py | 34 +++++++++++++++ .../apiserver_etcd_tls_config/__init__.py | 0 .../apiserver_etcd_tls_config.metadata.json | 36 ++++++++++++++++ .../apiserver_etcd_tls_config.py | 35 +++++++++++++++ .../__init__.py | 0 ...er_namespacelifecycle_plugin.metadata.json | 36 ++++++++++++++++ .../apiserver_namespacelifecycle_plugin.py | 43 +++++++++++++++++++ .../apiserver_no_alwaysadmit_plugin.py | 2 +- .../__init__.py | 0 ...erver_noderestriction_plugin.metadata.json | 36 ++++++++++++++++ .../apiserver_noderestriction_plugin.py | 35 +++++++++++++++ .../apiserver_request_timeout_set/__init__.py | 0 ...piserver_request_timeout_set.metadata.json | 36 ++++++++++++++++ .../apiserver_request_timeout_set.py | 36 ++++++++++++++++ .../__init__.py | 0 ...ecurity_context_deny_plugin.metadata.json} | 2 +- ...apiserver_security_context_deny_plugin.py} | 2 +- .../__init__.py | 0 ...service_account_key_file_set.metadata.json | 36 ++++++++++++++++ .../apiserver_service_account_key_file_set.py | 35 +++++++++++++++ .../__init__.py | 0 ..._service_account_lookup_true.metadata.json | 36 ++++++++++++++++ .../apiserver_service_account_lookup_true.py | 33 ++++++++++++++ .../__init__.py | 0 ...rver_service_account_plugin.metadata.json} | 2 +- .../apiserver_service_account_plugin.py} | 2 +- .../apiserver_strong_ciphers_only/__init__.py | 0 ...piserver_strong_ciphers_only.metadata.json | 36 ++++++++++++++++ .../apiserver_strong_ciphers_only.py | 39 +++++++++++++++++ .../apiserver_tls_config/__init__.py | 0 .../apiserver_tls_config.metadata.json | 36 ++++++++++++++++ .../apiserver_tls_config.py | 35 +++++++++++++++ 56 files changed, 1150 insertions(+), 6 deletions(-) rename prowler/providers/kubernetes/services/apiserver/{apiserver_securitycontextdeny_plugin => apiserver_audit_log_maxage_set}/__init__.py (100%) create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxage_set/apiserver_audit_log_maxage_set.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxage_set/apiserver_audit_log_maxage_set.py rename prowler/providers/kubernetes/services/apiserver/{apiserver_serviceaccount_plugin => apiserver_audit_log_maxbackup_set}/__init__.py (100%) create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxbackup_set/apiserver_audit_log_maxbackup_set.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxbackup_set/apiserver_audit_log_maxbackup_set.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxsize_set/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxsize_set/apiserver_audit_log_maxsize_set.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxsize_set/apiserver_audit_log_maxsize_set.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_path_set/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_path_set/apiserver_audit_log_path_set.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_path_set/apiserver_audit_log_path_set.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_client_ca_file_set/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_client_ca_file_set/apiserver_client_ca_file_set.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_client_ca_file_set/apiserver_client_ca_file_set.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_disable_profiling/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_disable_profiling/apiserver_disable_profiling.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_disable_profiling/apiserver_disable_profiling.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_encryption_provider_config_set/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_encryption_provider_config_set/apiserver_encryption_provider_config_set.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_encryption_provider_config_set/apiserver_encryption_provider_config_set.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_etcd_cafile_set/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_etcd_cafile_set/apiserver_etcd_cafile_set.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_etcd_cafile_set/apiserver_etcd_cafile_set.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_etcd_tls_config/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_etcd_tls_config/apiserver_etcd_tls_config.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_etcd_tls_config/apiserver_etcd_tls_config.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/apiserver_namespacelifecycle_plugin.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/apiserver_namespacelifecycle_plugin.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/apiserver_noderestriction_plugin.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/apiserver_noderestriction_plugin.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_request_timeout_set/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_request_timeout_set/apiserver_request_timeout_set.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_request_timeout_set/apiserver_request_timeout_set.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_security_context_deny_plugin/__init__.py rename prowler/providers/kubernetes/services/apiserver/{apiserver_securitycontextdeny_plugin/apiserver_securitycontextdeny_plugin.metadata.json => apiserver_security_context_deny_plugin/apiserver_security_context_deny_plugin.metadata.json} (97%) rename prowler/providers/kubernetes/services/apiserver/{apiserver_securitycontextdeny_plugin/apiserver_securitycontextdeny_plugin.py => apiserver_security_context_deny_plugin/apiserver_security_context_deny_plugin.py} (96%) create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/apiserver_service_account_key_file_set.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/apiserver_service_account_key_file_set.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/apiserver_service_account_lookup_true.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/apiserver_service_account_lookup_true.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_service_account_plugin/__init__.py rename prowler/providers/kubernetes/services/apiserver/{apiserver_serviceaccount_plugin/apiserver_serviceaccount_plugin.metadata.json => apiserver_service_account_plugin/apiserver_service_account_plugin.metadata.json} (97%) rename prowler/providers/kubernetes/services/apiserver/{apiserver_serviceaccount_plugin/apiserver_serviceaccount_plugin.py => apiserver_service_account_plugin/apiserver_service_account_plugin.py} (97%) create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_strong_ciphers_only/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_strong_ciphers_only/apiserver_strong_ciphers_only.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_strong_ciphers_only/apiserver_strong_ciphers_only.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_tls_config/__init__.py create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_tls_config/apiserver_tls_config.metadata.json create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_tls_config/apiserver_tls_config.py diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.py index cda8f5c24b..00f1895a88 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.py @@ -4,7 +4,7 @@ ) -class apiserver_event_rate_limit(Check): +class apiserver_alwayspullimages_plugin(Check): def execute(self) -> Check_Report_Kubernetes: findings = [] for pod in apiserver_client.apiserver_pods: diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxage_set/__init__.py similarity index 100% rename from prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/__init__.py rename to prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxage_set/__init__.py diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxage_set/apiserver_audit_log_maxage_set.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxage_set/apiserver_audit_log_maxage_set.metadata.json new file mode 100644 index 0000000000..5d091079f5 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxage_set/apiserver_audit_log_maxage_set.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_audit_log_maxage_set", + "CheckTitle": "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Auditing", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures that the Kubernetes API server is configured with an appropriate audit log retention period. Setting --audit-log-maxage to 30 or as per business requirements helps in maintaining logs for sufficient time to investigate past events.", + "Risk": "Without an adequate log retention period, there may be insufficient audit history to investigate and analyze past events or security incidents.", + "RelatedUrl": "https://kubernetes.io/docs/concepts/cluster-administration/audit/", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to set the --audit-log-maxage argument to 30 or an appropriate number of days. Example: --audit-log-maxage=30", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Configure the API server audit log retention period to retain logs for at least 30 days or as per your organization's requirements.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Logging", + "Compliance" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Ensure the audit log retention period is set appropriately to balance between storage constraints and the need for historical data." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxage_set/apiserver_audit_log_maxage_set.py b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxage_set/apiserver_audit_log_maxage_set.py new file mode 100644 index 0000000000..b3bde54377 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxage_set/apiserver_audit_log_maxage_set.py @@ -0,0 +1,37 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_audit_log_maxage_set(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = ( + "Audit log max age is set appropriately in the API server." + ) + + audit_log_maxage_set = False + for container in pod.containers.values(): + # Check if "--audit-log-maxage" is set to 30 or as appropriate + if "--audit-log-maxage" in container.command: + maxage_value = int( + container.command.split("--audit-log-maxage=")[1].split(" ")[0] + ) + if maxage_value >= 30: + audit_log_maxage_set = True + break + + if not audit_log_maxage_set: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = "Audit log max age is not set to 30 or as appropriate in container {container.name}." + + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxbackup_set/__init__.py similarity index 100% rename from prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/__init__.py rename to prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxbackup_set/__init__.py diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxbackup_set/apiserver_audit_log_maxbackup_set.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxbackup_set/apiserver_audit_log_maxbackup_set.metadata.json new file mode 100644 index 0000000000..588762e702 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxbackup_set/apiserver_audit_log_maxbackup_set.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_audit_log_maxbackup_set", + "CheckTitle": "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Auditing", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures that the Kubernetes API server is configured with an appropriate number of audit log backups. Setting --audit-log-maxbackup to 10 or as per business requirements helps maintain a sufficient log backup for investigations or analysis.", + "Risk": "Without an adequate number of audit log backups, there may be insufficient log history to investigate past events or security incidents.", + "RelatedUrl": "https://kubernetes.io/docs/concepts/cluster-administration/audit/", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to set the --audit-log-maxbackup argument to 10 or an appropriate number. Example: --audit-log-maxbackup=10", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Configure the API server audit log backup retention to 10 or as per your organization's requirements.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Logging", + "Compliance" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Ensure the audit log backup retention period is set appropriately to balance between storage constraints and the need for historical data." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxbackup_set/apiserver_audit_log_maxbackup_set.py b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxbackup_set/apiserver_audit_log_maxbackup_set.py new file mode 100644 index 0000000000..61d764cd9a --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxbackup_set/apiserver_audit_log_maxbackup_set.py @@ -0,0 +1,38 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_audit_log_maxbackup_set(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = ( + "Audit log max backup is set appropriately in the API server." + ) + audit_log_maxbackup_set = False + for container in pod.containers.values(): + # Check if "--audit-log-maxbackup" is set to 10 or as appropriate + if "--audit-log-maxbackup" in container.command: + maxbackup_value = int( + container.command.split("--audit-log-maxbackup=")[1].split(" ")[ + 0 + ] + ) + if maxbackup_value >= 10: + audit_log_maxbackup_set = True + break + + if not audit_log_maxbackup_set: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = f"Audit log max backup is not set to 10 or as appropriate in container {container.name}." + + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxsize_set/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxsize_set/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxsize_set/apiserver_audit_log_maxsize_set.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxsize_set/apiserver_audit_log_maxsize_set.metadata.json new file mode 100644 index 0000000000..8e0792a529 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxsize_set/apiserver_audit_log_maxsize_set.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_audit_log_maxsize_set", + "CheckTitle": "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Auditing", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures that the Kubernetes API server is configured with an appropriate audit log file size limit. Setting --audit-log-maxsize to 100 MB or as per business requirements helps manage the size of log files and prevents them from growing excessively large.", + "Risk": "Without an appropriate audit log file size limit, log files can grow excessively large, potentially leading to storage issues and difficulty in log analysis.", + "RelatedUrl": "https://kubernetes.io/docs/concepts/cluster-administration/audit/", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to set the --audit-log-maxsize argument to 100 MB or an appropriate size. Example: --audit-log-maxsize=100", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Configure the API server audit log file size limit to 100 MB or as per your organization's requirements.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Logging", + "Compliance" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Adjust the audit log file size limit based on your organization's storage capabilities and logging requirements." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxsize_set/apiserver_audit_log_maxsize_set.py b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxsize_set/apiserver_audit_log_maxsize_set.py new file mode 100644 index 0000000000..618c63b55a --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxsize_set/apiserver_audit_log_maxsize_set.py @@ -0,0 +1,37 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_audit_log_maxsize_set(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = ( + "Audit log max size is set appropriately in the API server." + ) + + audit_log_maxsize_set = False + for container in pod.containers.values(): + # Check if "--audit-log-maxsize" is set to 100 MB or as appropriate + if "--audit-log-maxsize" in container.command: + maxsize_value = int( + container.command.split("--audit-log-maxsize=")[1].split(" ")[0] + ) + if maxsize_value >= 100: + audit_log_maxsize_set = True + break + + if not audit_log_maxsize_set: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = f"Audit log max size is not set to 100 MB or as appropriate in container {container.name}." + + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_path_set/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_path_set/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_path_set/apiserver_audit_log_path_set.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_path_set/apiserver_audit_log_path_set.metadata.json new file mode 100644 index 0000000000..f48962d8fe --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_path_set/apiserver_audit_log_path_set.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_audit_log_path_set", + "CheckTitle": "Ensure that the --audit-log-path argument is set", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Auditing", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "KubernetesAPIServer", + "Description": "This check verifies that the Kubernetes API server is configured with an audit log path. Enabling audit logs helps in maintaining a chronological record of all activities and operations which can be critical for security analysis and troubleshooting.", + "Risk": "Without audit logs, it becomes difficult to track changes and activities within the cluster, potentially obscuring the detection of malicious activities or operational issues.", + "RelatedUrl": "https://kubernetes.io/docs/concepts/cluster-administration/audit/", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to set the --audit-log-path argument to a valid log path. Example: --audit-log-path=/var/log/apiserver/audit.log", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Enable audit logging in the API server by specifying a valid path for --audit-log-path to ensure comprehensive activity logging within the cluster.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Logging", + "Compliance" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Audit logs are not enabled by default in Kubernetes. Configuring them is essential for security monitoring and forensic analysis." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_path_set/apiserver_audit_log_path_set.py b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_path_set/apiserver_audit_log_path_set.py new file mode 100644 index 0000000000..3f0de4acd1 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_path_set/apiserver_audit_log_path_set.py @@ -0,0 +1,33 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_audit_log_path_set(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = "Audit log path is set in the API server." + + audit_log_path_set = False + for container in pod.containers.values(): + # Check if "--audit-log-path" is set + if "--audit-log-path" in container.command: + audit_log_path_set = True + break + + if not audit_log_path_set: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = ( + "Audit log path is not set in container {container.name}." + ) + + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_client_ca_file_set/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_client_ca_file_set/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_client_ca_file_set/apiserver_client_ca_file_set.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_client_ca_file_set/apiserver_client_ca_file_set.metadata.json new file mode 100644 index 0000000000..4c295f7178 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_client_ca_file_set/apiserver_client_ca_file_set.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_client_ca_file_set", + "CheckTitle": "Ensure that the --client-ca-file argument is set as appropriate", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "TLS Authentication", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures that the Kubernetes API server is configured with the --client-ca-file argument, specifying the CA file for client authentication. This setting enables the API server to authenticate clients using certificates signed by the CA and is crucial for secure communication.", + "Risk": "If the client CA file is not set, the API server may not properly authenticate clients, potentially leading to unauthorized access.", + "RelatedUrl": "http://rootsquash.com/2016/05/10/securing-the-kubernetes-api/", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to include the --client-ca-file parameter with the appropriate CA file. Example: --client-ca-file=", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Ensure the API server is configured with a client CA file for secure client authentication.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Access Control", + "TLS Configuration" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "The client CA file is a critical component of TLS authentication and should be properly managed and securely stored." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_client_ca_file_set/apiserver_client_ca_file_set.py b/prowler/providers/kubernetes/services/apiserver/apiserver_client_ca_file_set/apiserver_client_ca_file_set.py new file mode 100644 index 0000000000..7cf0340ccd --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_client_ca_file_set/apiserver_client_ca_file_set.py @@ -0,0 +1,34 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_client_ca_file_set(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = ( + "Client CA file is set appropriately in the API server." + ) + client_ca_file_set = False + for container in pod.containers.values(): + # Check if "--client-ca-file" is set + if "--client-ca-file" in container.command: + client_ca_file_set = True + break + + if not client_ca_file_set: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = ( + "Client CA file is not set in container {container.name}." + ) + + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_disable_profiling/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_disable_profiling/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_disable_profiling/apiserver_disable_profiling.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_disable_profiling/apiserver_disable_profiling.metadata.json new file mode 100644 index 0000000000..4f8ca4fcb8 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_disable_profiling/apiserver_disable_profiling.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_disable_profiling", + "CheckTitle": "Ensure that the --profiling argument is set to false", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Performance", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures that profiling is disabled in the Kubernetes API server. Profiling generates extensive data about the system's performance and operations, which, if not needed, should be disabled to reduce the attack surface.", + "Risk": "Enabled profiling can potentially expose detailed system and program data, which might be exploited for malicious purposes.", + "RelatedUrl": "https://github.com/kubernetes/community/blob/master/contributors/devel/profiling.md", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to set the --profiling argument to false. Example: --profiling=false", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Disable profiling in the API server unless it is necessary for troubleshooting performance bottlenecks.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Security Best Practices", + "Configuration Management" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Profiling is enabled by default in Kubernetes. Disabling it when not needed helps in securing the cluster." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_disable_profiling/apiserver_disable_profiling.py b/prowler/providers/kubernetes/services/apiserver/apiserver_disable_profiling/apiserver_disable_profiling.py new file mode 100644 index 0000000000..ba5f77e8ef --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_disable_profiling/apiserver_disable_profiling.py @@ -0,0 +1,31 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_disable_profiling(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = "Profiling is disabled in the API server." + profiling_enabled = False + for container in pod.containers.values(): + # Check if "--profiling" is set to false + if "--profiling=false" not in container.command: + profiling_enabled = True + break + if profiling_enabled: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = ( + f"Profiling is enabled in container {container.name}." + ) + + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_encryption_provider_config_set/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_encryption_provider_config_set/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_encryption_provider_config_set/apiserver_encryption_provider_config_set.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_encryption_provider_config_set/apiserver_encryption_provider_config_set.metadata.json new file mode 100644 index 0000000000..ee2c0d7703 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_encryption_provider_config_set/apiserver_encryption_provider_config_set.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_encryption_provider_config_set", + "CheckTitle": "Ensure that the --encryption-provider-config argument is set as appropriate", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Data Encryption", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures that the Kubernetes API server is configured with the --encryption-provider-config argument to encrypt sensitive data at rest in the etcd key-value store. Encrypting data at rest prevents potential unauthorized disclosures and ensures that the sensitive data is secure.", + "Risk": "Without proper configuration of the encryption provider, sensitive data stored in etcd might not be encrypted, posing a risk of data breaches.", + "RelatedUrl": "https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to include the --encryption-provider-config parameter with the path to the EncryptionConfig file. Example: --encryption-provider-config=/path/to/EncryptionConfig/File", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Configure and enable encryption for data at rest in etcd using a suitable EncryptionConfig file.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Data Security", + "Configuration Optimization" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Ensure that the EncryptionConfig file is correctly configured and securely stored." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_encryption_provider_config_set/apiserver_encryption_provider_config_set.py b/prowler/providers/kubernetes/services/apiserver/apiserver_encryption_provider_config_set/apiserver_encryption_provider_config_set.py new file mode 100644 index 0000000000..f19788f993 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_encryption_provider_config_set/apiserver_encryption_provider_config_set.py @@ -0,0 +1,33 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_encryption_provider_config_set(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = ( + "Encryption provider config is set appropriately in the API server." + ) + + encryption_provider_config_set = False + for container in pod.containers.values(): + # Check if "--encryption-provider-config" is set + if "--encryption-provider-config" in container.command: + encryption_provider_config_set = True + break + + if not encryption_provider_config_set: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = f"Encryption provider config is not set in container {container.name}." + + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_cafile_set/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_cafile_set/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_cafile_set/apiserver_etcd_cafile_set.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_cafile_set/apiserver_etcd_cafile_set.metadata.json new file mode 100644 index 0000000000..c993710d9b --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_cafile_set/apiserver_etcd_cafile_set.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_etcd_cafile_set", + "CheckTitle": "Ensure that the --etcd-cafile argument is set as appropriate", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "etcd Connection", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures that the Kubernetes API server is configured with the --etcd-cafile argument, specifying the Certificate Authority file for etcd client connections. This setting is important for secure communication with etcd and ensures that the API server connects to etcd with an SSL Certificate Authority file.", + "Risk": "Without proper TLS configuration, communication between the API server and etcd can be unencrypted, leading to potential security vulnerabilities.", + "RelatedUrl": "https://coreos.com/etcd/docs/latest/op-guide/security.html", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to include the --etcd-cafile parameter with the appropriate CA file. Example: --etcd-cafile=", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Ensure etcd connections from the API server are secured using the appropriate CA file.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Data Security", + "TLS Configuration" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "It is crucial to manage and rotate the CA file securely as part of your cluster's security practices." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_cafile_set/apiserver_etcd_cafile_set.py b/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_cafile_set/apiserver_etcd_cafile_set.py new file mode 100644 index 0000000000..a1099c30af --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_cafile_set/apiserver_etcd_cafile_set.py @@ -0,0 +1,34 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_etcd_cafile_set(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = ( + "etcd CA file is set appropriately in the API server." + ) + etcd_cafile_set = False + for container in pod.containers.values(): + # Check if "--etcd-cafile" is set + if "--etcd-cafile" in container.command: + etcd_cafile_set = True + break + + if not etcd_cafile_set: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = ( + f"etcd CA file is not set in container {container.name}." + ) + + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_tls_config/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_tls_config/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_tls_config/apiserver_etcd_tls_config.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_tls_config/apiserver_etcd_tls_config.metadata.json new file mode 100644 index 0000000000..75ea272630 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_tls_config/apiserver_etcd_tls_config.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_etcd_tls_config", + "CheckTitle": "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "etcd Connection", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures that the Kubernetes API server is configured with TLS encryption for etcd client connections, using --etcd-certfile and --etcd-keyfile arguments. Setting up TLS for etcd is crucial for securing the sensitive data stored in etcd as it's the primary datastore for Kubernetes.", + "Risk": "Without TLS encryption, data stored in etcd is susceptible to eavesdropping and man-in-the-middle attacks, potentially leading to data breaches.", + "RelatedUrl": "https://coreos.com/etcd/docs/latest/op-guide/security.html", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to include TLS parameters for etcd connection. Example: --etcd-certfile= --etcd-keyfile=", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Enable TLS encryption for etcd client connections to secure sensitive data.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Data Security", + "Configuration Optimization" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "TLS encryption for etcd is not enabled by default and should be explicitly configured." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_tls_config/apiserver_etcd_tls_config.py b/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_tls_config/apiserver_etcd_tls_config.py new file mode 100644 index 0000000000..44a33e90fd --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_tls_config/apiserver_etcd_tls_config.py @@ -0,0 +1,35 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_etcd_tls_config(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = ( + "TLS configuration for etcd is set appropriately in the API server." + ) + etcd_tls_config_set = False + for container in pod.containers.values(): + # Check if "--etcd-certfile" and "--etcd-keyfile" are set + if ( + "--etcd-certfile" in container.command + and "--etcd-keyfile" in container.command + ): + etcd_tls_config_set = True + break + + if not etcd_tls_config_set: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = f"TLS configuration for etcd is not set in container {container.name}." + + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/apiserver_namespacelifecycle_plugin.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/apiserver_namespacelifecycle_plugin.metadata.json new file mode 100644 index 0000000000..e0ba41c8cd --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/apiserver_namespacelifecycle_plugin.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_namespacelifecycle_plugin", + "CheckTitle": "Ensure that the admission control plugin NamespaceLifecycle is set", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Admission Control", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesAPIServer", + "Description": "This check verifies that the NamespaceLifecycle admission control plugin is enabled in the Kubernetes API server. This plugin prevents the creation of objects in non-existent or terminating namespaces, enforcing the integrity of the namespace lifecycle and availability of new objects.", + "Risk": "Without NamespaceLifecycle, objects may be created in namespaces that are being terminated, potentially leading to inconsistencies and resource conflicts.", + "RelatedUrl": "https://kubernetes.io/docs/admin/admission-controllers/#namespacelifecycle", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to ensure that NamespaceLifecycle is included in the --enable-admission-plugins argument. Remove the plugin from --disable-admission-plugins if present.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Enable the NamespaceLifecycle admission control plugin in the API server to enforce proper namespace management.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Namespace Management", + "Cluster Security" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "NamespaceLifecycle plugin is usually enabled by default, ensuring proper management of namespace creation and termination." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/apiserver_namespacelifecycle_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/apiserver_namespacelifecycle_plugin.py new file mode 100644 index 0000000000..1a7b5ccda6 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/apiserver_namespacelifecycle_plugin.py @@ -0,0 +1,43 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_namespacelifecycle_plugin(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = ( + "NamespaceLifecycle admission control plugin is set." + ) + + namespace_lifecycle_plugin_set = False + for container in pod.containers.values(): + # Check if "--enable-admission-plugins" includes "NamespaceLifecycle" + # and "--disable-admission-plugins" does not include "NamespaceLifecycle" + if "--enable-admission-plugins" in container.command: + admission_plugins = container.command.split( + "--enable-admission-plugins=" + )[1].split(",") + if "NamespaceLifecycle" in admission_plugins: + namespace_lifecycle_plugin_set = True + if "--disable-admission-plugins" in container.command: + disabled_plugins = container.command.split( + "--disable-admission-plugins=" + )[1].split(",") + if "NamespaceLifecycle" in disabled_plugins: + namespace_lifecycle_plugin_set = False + + if not namespace_lifecycle_plugin_set: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = f"NamespaceLifecycle admission control plugin is not set in container {container.name}." + + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.py index 6cdbd443ff..d897b9838e 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.py @@ -4,7 +4,7 @@ ) -class apiserver_event_rate_limit(Check): +class apiserver_no_alwaysadmit_plugin(Check): def execute(self) -> Check_Report_Kubernetes: findings = [] for pod in apiserver_client.apiserver_pods: diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/apiserver_noderestriction_plugin.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/apiserver_noderestriction_plugin.metadata.json new file mode 100644 index 0000000000..b694fd25ed --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/apiserver_noderestriction_plugin.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_noderestriction_plugin", + "CheckTitle": "Ensure that the admission control plugin NodeRestriction is set", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Admission Control", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures that the NodeRestriction admission control plugin is enabled in the Kubernetes API server. NodeRestriction limits the Node and Pod objects that a kubelet can modify, enhancing security by ensuring kubelets are restricted to manage their own node and pods.", + "Risk": "Without NodeRestriction, kubelets may have broader access to Node and Pod objects, potentially leading to unauthorized modifications and security risks.", + "RelatedUrl": "https://kubernetes.io/docs/admin/admission-controllers/#noderestriction", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to include NodeRestriction in the --enable-admission-plugins argument. Example: --enable-admission-plugins=...,NodeRestriction,...", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Enable the NodeRestriction admission control plugin in the API server for enhanced node and pod security.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Node Security", + "Admission Control" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "NodeRestriction is critical in clusters where kubelets need restricted access to Node and Pod objects they manage." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/apiserver_noderestriction_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/apiserver_noderestriction_plugin.py new file mode 100644 index 0000000000..a11c936038 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/apiserver_noderestriction_plugin.py @@ -0,0 +1,35 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_noderestriction_plugin(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = "NodeRestriction admission control plugin is set." + + node_restriction_plugin_set = False + for container in pod.containers.values(): + # Check if "--enable-admission-plugins" includes "NodeRestriction" + if "--enable-admission-plugins" in container.command: + admission_plugins = container.command.split( + "--enable-admission-plugins=" + )[1].split(",") + if "NodeRestriction" in admission_plugins: + node_restriction_plugin_set = True + break + + if not node_restriction_plugin_set: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = f"NodeRestriction admission control plugin is not set in container {container.name}." + + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_request_timeout_set/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_request_timeout_set/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_request_timeout_set/apiserver_request_timeout_set.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_request_timeout_set/apiserver_request_timeout_set.metadata.json new file mode 100644 index 0000000000..933d868a77 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_request_timeout_set/apiserver_request_timeout_set.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_request_timeout_set", + "CheckTitle": "Ensure that the --request-timeout argument is set as appropriate", + "CheckType": [ + "Performance", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Request Handling", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesAPIServer", + "Description": "This check verifies that the Kubernetes API server is configured with an appropriate global request timeout. Setting a suitable --request-timeout value ensures the API server can handle requests efficiently without exhausting resources, especially in cases of slower connections or high-volume data requests.", + "Risk": "An inadequately set request timeout may lead to inefficient handling of API requests, either by timing out too quickly on slow connections or by allowing requests to consume excessive resources, leading to potential Denial-of-Service attacks.", + "RelatedUrl": "https://github.com/kubernetes/kubernetes/pull/51415", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to set the --request-timeout argument to an appropriate value based on your environment. Example: --request-timeout=300s", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Set the API server request timeout to a value that balances resource usage efficiency and the needs of your environment, considering connection speeds and data volumes.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Resource Management", + "Configuration Optimization" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "The default timeout is set to 60 seconds. Adjust according to the specific requirements and constraints of your Kubernetes environment." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_request_timeout_set/apiserver_request_timeout_set.py b/prowler/providers/kubernetes/services/apiserver/apiserver_request_timeout_set/apiserver_request_timeout_set.py new file mode 100644 index 0000000000..de480cdb86 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_request_timeout_set/apiserver_request_timeout_set.py @@ -0,0 +1,36 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_request_timeout_set(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = ( + "Request timeout is set appropriately in the API server." + ) + request_timeout_set = False + for container in pod.containers.values(): + # Check if "--request-timeout" is set to an appropriate value + if "--request-timeout" in container.command: + # timeout_value = container.command.split("--request-timeout=")[ + # 1 + # ].split(" ")[0] + # Assuming the value is valid, e.g., '300s' or '1m' + request_timeout_set = True + break + + if not request_timeout_set: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = "Request timeout is not set or not set appropriately in container {container.name}." + + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_security_context_deny_plugin/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_security_context_deny_plugin/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/apiserver_securitycontextdeny_plugin.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_security_context_deny_plugin/apiserver_security_context_deny_plugin.metadata.json similarity index 97% rename from prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/apiserver_securitycontextdeny_plugin.metadata.json rename to prowler/providers/kubernetes/services/apiserver/apiserver_security_context_deny_plugin/apiserver_security_context_deny_plugin.metadata.json index b4b262d76c..e3687334f8 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/apiserver_securitycontextdeny_plugin.metadata.json +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_security_context_deny_plugin/apiserver_security_context_deny_plugin.metadata.json @@ -1,6 +1,6 @@ { "Provider": "kubernetes", - "CheckID": "apiserver_securitycontextdeny_plugin", + "CheckID": "apiserver_security_context_deny_plugin", "CheckTitle": "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", "CheckType": [ "Security", diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/apiserver_securitycontextdeny_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_security_context_deny_plugin/apiserver_security_context_deny_plugin.py similarity index 96% rename from prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/apiserver_securitycontextdeny_plugin.py rename to prowler/providers/kubernetes/services/apiserver/apiserver_security_context_deny_plugin/apiserver_security_context_deny_plugin.py index 895806ada1..3cf4d322eb 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_securitycontextdeny_plugin/apiserver_securitycontextdeny_plugin.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_security_context_deny_plugin/apiserver_security_context_deny_plugin.py @@ -4,7 +4,7 @@ ) -class apiserver_event_rate_limit(Check): +class apiserver_security_context_deny_plugin(Check): def execute(self) -> Check_Report_Kubernetes: findings = [] for pod in apiserver_client.apiserver_pods: diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/apiserver_service_account_key_file_set.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/apiserver_service_account_key_file_set.metadata.json new file mode 100644 index 0000000000..c576e2cab6 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/apiserver_service_account_key_file_set.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_service_account_key_file_set", + "CheckTitle": "Ensure that the --service-account-key-file argument is set as appropriate", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Authentication", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures that the Kubernetes API server is configured with a --service-account-key-file argument, specifying the public key file for service account verification. A separate key pair for service accounts enhances security by enabling key rotation and ensuring service account tokens are verified with a specific public key.", + "Risk": "Without a specified service account public key file, the API server may use the private key from its TLS serving certificate, hindering the ability to rotate keys and increasing security risks.", + "RelatedUrl": "https://github.com/kubernetes/kubernetes/issues/24167", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to set --service-account-key-file to a valid public key file. Example: --service-account-key-file=", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Specify a separate public key file for verifying service account tokens in the API server.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Authentication", + "Key Management" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Ensure the public key used is securely managed and rotated in accordance with your organization's security policy." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/apiserver_service_account_key_file_set.py b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/apiserver_service_account_key_file_set.py new file mode 100644 index 0000000000..2a00f9966d --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/apiserver_service_account_key_file_set.py @@ -0,0 +1,35 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_service_account_key_file_set(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = ( + "Service account key file is set appropriately in the API server." + ) + + service_account_key_file_set = False + for container in pod.containers.values(): + # Check if "--service-account-key-file" is set + if "--service-account-key-file" in container.command: + service_account_key_file_set = True + break + + if not service_account_key_file_set: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = ( + "Service account key file is not set in container {container.name}." + ) + + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/apiserver_service_account_lookup_true.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/apiserver_service_account_lookup_true.metadata.json new file mode 100644 index 0000000000..c023ae4f22 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/apiserver_service_account_lookup_true.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_service_account_lookup_true", + "CheckTitle": "Ensure that the --service-account-lookup argument is set to true", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Authentication", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures that the Kubernetes API server is configured with --service-account-lookup set to true. This setting validates the service account associated with each request, ensuring that the service account token is not only valid but also currently exists.", + "Risk": "If --service-account-lookup is disabled, deleted service accounts might still be used, posing a security risk.", + "RelatedUrl": "https://github.com/kubernetes/kubernetes/issues/24167", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to set --service-account-lookup to true. Example: --service-account-lookup=true", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Enable service account lookup in the API server to ensure that only existing service accounts are used for authentication.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Access Control", + "Best Practices" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "By default, this argument is set to true. It's critical to maintain this setting for security." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/apiserver_service_account_lookup_true.py b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/apiserver_service_account_lookup_true.py new file mode 100644 index 0000000000..da6439691c --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/apiserver_service_account_lookup_true.py @@ -0,0 +1,33 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_service_account_lookup_true(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = ( + "Service account lookup is set to true in the API server." + ) + + service_account_lookup_set = False + for container in pod.containers.values(): + # Check if "--service-account-lookup" is set to true + if "--service-account-lookup=true" in container.command: + service_account_lookup_set = True + break + + if not service_account_lookup_set: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = "Service account lookup is not set to true in container {container.name}." + + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_plugin/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_plugin/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/apiserver_serviceaccount_plugin.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_plugin/apiserver_service_account_plugin.metadata.json similarity index 97% rename from prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/apiserver_serviceaccount_plugin.metadata.json rename to prowler/providers/kubernetes/services/apiserver/apiserver_service_account_plugin/apiserver_service_account_plugin.metadata.json index da5c19e6d8..bb07a9d8b0 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/apiserver_serviceaccount_plugin.metadata.json +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_plugin/apiserver_service_account_plugin.metadata.json @@ -1,6 +1,6 @@ { "Provider": "kubernetes", - "CheckID": "apiserver_serviceaccount_plugin", + "CheckID": "apiserver_service_account_plugin", "CheckTitle": "Ensure that the admission control plugin ServiceAccount is set", "CheckType": [ "Security", diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/apiserver_serviceaccount_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_plugin/apiserver_service_account_plugin.py similarity index 97% rename from prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/apiserver_serviceaccount_plugin.py rename to prowler/providers/kubernetes/services/apiserver/apiserver_service_account_plugin/apiserver_service_account_plugin.py index 127f475a77..310bcd48f4 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_serviceaccount_plugin/apiserver_serviceaccount_plugin.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_plugin/apiserver_service_account_plugin.py @@ -4,7 +4,7 @@ ) -class apiserver_event_rate_limit(Check): +class apiserver_service_account_plugin(Check): def execute(self) -> Check_Report_Kubernetes: findings = [] for pod in apiserver_client.apiserver_pods: diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_strong_ciphers_only/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_strong_ciphers_only/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_strong_ciphers_only/apiserver_strong_ciphers_only.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_strong_ciphers_only/apiserver_strong_ciphers_only.metadata.json new file mode 100644 index 0000000000..a7352729f0 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_strong_ciphers_only/apiserver_strong_ciphers_only.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_strong_ciphers_only", + "CheckTitle": "Ensure that the API Server only makes use of Strong Cryptographic Ciphers", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "TLS Cipher Configuration", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures that the Kubernetes API server is configured to only use strong cryptographic ciphers, minimizing the risk of vulnerabilities associated with weaker ciphers. Strong ciphers enhance the security of TLS connections to the API server.", + "Risk": "Using weak ciphers can leave the API server vulnerable to cryptographic attacks, compromising the security of data in transit.", + "RelatedUrl": "https://github.com/ssllabs/research/wiki/SSL-and-TLS-Deployment-Best-Practices#23-use-secure-cipher-suites", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to include only strong cryptographic ciphers in the --tls-cipher-suites parameter. Example: --tls-cipher-suites=TLS_AES_128_GCM_SHA256,...", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Restrict the API server to only use strong cryptographic ciphers for enhanced security.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Data Security", + "Network Security" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "The choice of ciphers may need to be updated based on evolving security standards and client compatibility." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_strong_ciphers_only/apiserver_strong_ciphers_only.py b/prowler/providers/kubernetes/services/apiserver/apiserver_strong_ciphers_only/apiserver_strong_ciphers_only.py new file mode 100644 index 0000000000..e32bca2ec4 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_strong_ciphers_only/apiserver_strong_ciphers_only.py @@ -0,0 +1,39 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_strong_ciphers_only(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = ( + "API Server is configured with strong cryptographic ciphers." + ) + strong_ciphers_set = False + for container in pod.containers.values(): + # Check if strong ciphers are set in "--tls-cipher-suites" + if "--tls-cipher-suites" in container.command and all( + cipher in container.command + for cipher in [ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + ] + ): + strong_ciphers_set = True + break + + if not strong_ciphers_set: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = "API Server is not using only strong cryptographic ciphers in container {container.name}." + + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_tls_config/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_tls_config/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_tls_config/apiserver_tls_config.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_tls_config/apiserver_tls_config.metadata.json new file mode 100644 index 0000000000..c010388c36 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_tls_config/apiserver_tls_config.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_tls_config", + "CheckTitle": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "TLS Configuration", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "KubernetesAPIServer", + "Description": "This check ensures that the Kubernetes API server is configured with TLS for secure communication. The --tls-cert-file and --tls-private-key-file arguments should be set to enable TLS encryption, thereby securing sensitive data transmitted to and from the API server.", + "Risk": "If TLS is not properly configured, the API server communication could be unencrypted, leading to potential data breaches.", + "RelatedUrl": "https://kubernetes.io/docs/admin/kube-apiserver/", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to include TLS parameters. Example: --tls-cert-file= --tls-private-key-file=", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Ensure TLS is enabled and properly configured for the API server to secure communications.", + "Url": "http://rootsquash.com/2016/05/10/securing-the-kubernetes-api/" + } + }, + "Categories": [ + "Data Security", + "Configuration Optimization" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "TLS should be a standard security measure for all Kubernetes deployments to protect sensitive data." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_tls_config/apiserver_tls_config.py b/prowler/providers/kubernetes/services/apiserver/apiserver_tls_config/apiserver_tls_config.py new file mode 100644 index 0000000000..85650bbccb --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_tls_config/apiserver_tls_config.py @@ -0,0 +1,35 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) + + +class apiserver_tls_config(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in apiserver_client.apiserver_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = ( + "TLS certificate and key are set appropriately in the API server." + ) + tls_config_set = False + for container in pod.containers.values(): + # Check if both "--tls-cert-file" and "--tls-private-key-file" are set + if ( + "--tls-cert-file" in container.command + and "--tls-private-key-file" in container.command + ): + tls_config_set = True + break + + if not tls_config_set: + report.resource_id = container.name + report.status = "FAIL" + report.status_extended = "TLS certificate and/or key are not set in container {container.name}." + + findings.append(report) + return findings From 2b5d78e7b0c278929406efbf24e11529b11bde37 Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Wed, 10 Jan 2024 13:06:29 +0100 Subject: [PATCH 06/21] change check names --- .../__init__.py | 0 ...r_always_pull_images_plugin.metadata.json} | 2 +- .../apiserver_always_pull_images_plugin.py} | 2 +- .../__init__.py | 0 ..._namespace_lifecycle_plugin.metadata.json} | 2 +- .../apiserver_namespace_lifecycle_plugin.py} | 2 +- .../__init__.py | 0 ...erver_no_always_admit_plugin.metadata.json | 36 +++++++++++++++++++ .../apiserver_no_always_admit_plugin.py} | 2 +- ...server_no_alwaysadmit_plugin.metadata.json | 36 ------------------- .../__init__.py | 0 ...ver_node_restriction_plugin.metadata.json} | 2 +- .../apiserver_node_restriction_plugin.py} | 2 +- 13 files changed, 43 insertions(+), 43 deletions(-) rename prowler/providers/kubernetes/services/apiserver/{apiserver_alwayspullimages_plugin => apiserver_always_pull_images_plugin}/__init__.py (100%) rename prowler/providers/kubernetes/services/apiserver/{apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.metadata.json => apiserver_always_pull_images_plugin/apiserver_always_pull_images_plugin.metadata.json} (97%) rename prowler/providers/kubernetes/services/apiserver/{apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.py => apiserver_always_pull_images_plugin/apiserver_always_pull_images_plugin.py} (96%) rename prowler/providers/kubernetes/services/apiserver/{apiserver_namespacelifecycle_plugin => apiserver_namespace_lifecycle_plugin}/__init__.py (100%) rename prowler/providers/kubernetes/services/apiserver/{apiserver_namespacelifecycle_plugin/apiserver_namespacelifecycle_plugin.metadata.json => apiserver_namespace_lifecycle_plugin/apiserver_namespace_lifecycle_plugin.metadata.json} (96%) rename prowler/providers/kubernetes/services/apiserver/{apiserver_namespacelifecycle_plugin/apiserver_namespacelifecycle_plugin.py => apiserver_namespace_lifecycle_plugin/apiserver_namespace_lifecycle_plugin.py} (97%) rename prowler/providers/kubernetes/services/apiserver/{apiserver_no_alwaysadmit_plugin => apiserver_no_always_admit_plugin}/__init__.py (100%) create mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_no_always_admit_plugin/apiserver_no_always_admit_plugin.metadata.json rename prowler/providers/kubernetes/services/apiserver/{apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.py => apiserver_no_always_admit_plugin/apiserver_no_always_admit_plugin.py} (96%) delete mode 100644 prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.metadata.json rename prowler/providers/kubernetes/services/apiserver/{apiserver_noderestriction_plugin => apiserver_node_restriction_plugin}/__init__.py (100%) rename prowler/providers/kubernetes/services/apiserver/{apiserver_noderestriction_plugin/apiserver_noderestriction_plugin.metadata.json => apiserver_node_restriction_plugin/apiserver_node_restriction_plugin.metadata.json} (97%) rename prowler/providers/kubernetes/services/apiserver/{apiserver_noderestriction_plugin/apiserver_noderestriction_plugin.py => apiserver_node_restriction_plugin/apiserver_node_restriction_plugin.py} (96%) diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_always_pull_images_plugin/__init__.py similarity index 100% rename from prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/__init__.py rename to prowler/providers/kubernetes/services/apiserver/apiserver_always_pull_images_plugin/__init__.py diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_always_pull_images_plugin/apiserver_always_pull_images_plugin.metadata.json similarity index 97% rename from prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.metadata.json rename to prowler/providers/kubernetes/services/apiserver/apiserver_always_pull_images_plugin/apiserver_always_pull_images_plugin.metadata.json index a4bc730671..bd4a9ef11d 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.metadata.json +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_always_pull_images_plugin/apiserver_always_pull_images_plugin.metadata.json @@ -1,6 +1,6 @@ { "Provider": "kubernetes", - "CheckID": "apiserver_alwayspullimages_plugin", + "CheckID": "apiserver_always_pull_images_plugin", "CheckTitle": "Ensure that the admission control plugin AlwaysPullImages is set", "CheckType": [ "Security", diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_always_pull_images_plugin/apiserver_always_pull_images_plugin.py similarity index 96% rename from prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.py rename to prowler/providers/kubernetes/services/apiserver/apiserver_always_pull_images_plugin/apiserver_always_pull_images_plugin.py index 00f1895a88..0e53a56dfa 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_alwayspullimages_plugin/apiserver_alwayspullimages_plugin.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_always_pull_images_plugin/apiserver_always_pull_images_plugin.py @@ -4,7 +4,7 @@ ) -class apiserver_alwayspullimages_plugin(Check): +class apiserver_always_pull_images_plugin(Check): def execute(self) -> Check_Report_Kubernetes: findings = [] for pod in apiserver_client.apiserver_pods: diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_namespace_lifecycle_plugin/__init__.py similarity index 100% rename from prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/__init__.py rename to prowler/providers/kubernetes/services/apiserver/apiserver_namespace_lifecycle_plugin/__init__.py diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/apiserver_namespacelifecycle_plugin.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_namespace_lifecycle_plugin/apiserver_namespace_lifecycle_plugin.metadata.json similarity index 96% rename from prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/apiserver_namespacelifecycle_plugin.metadata.json rename to prowler/providers/kubernetes/services/apiserver/apiserver_namespace_lifecycle_plugin/apiserver_namespace_lifecycle_plugin.metadata.json index e0ba41c8cd..65f07a4825 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/apiserver_namespacelifecycle_plugin.metadata.json +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_namespace_lifecycle_plugin/apiserver_namespace_lifecycle_plugin.metadata.json @@ -1,6 +1,6 @@ { "Provider": "kubernetes", - "CheckID": "apiserver_namespacelifecycle_plugin", + "CheckID": "apiserver_namespace_lifecycle_plugin", "CheckTitle": "Ensure that the admission control plugin NamespaceLifecycle is set", "CheckType": [ "Security", diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/apiserver_namespacelifecycle_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_namespace_lifecycle_plugin/apiserver_namespace_lifecycle_plugin.py similarity index 97% rename from prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/apiserver_namespacelifecycle_plugin.py rename to prowler/providers/kubernetes/services/apiserver/apiserver_namespace_lifecycle_plugin/apiserver_namespace_lifecycle_plugin.py index 1a7b5ccda6..83c6a6be57 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_namespacelifecycle_plugin/apiserver_namespacelifecycle_plugin.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_namespace_lifecycle_plugin/apiserver_namespace_lifecycle_plugin.py @@ -4,7 +4,7 @@ ) -class apiserver_namespacelifecycle_plugin(Check): +class apiserver_namespace_lifecycle_plugin(Check): def execute(self) -> Check_Report_Kubernetes: findings = [] for pod in apiserver_client.apiserver_pods: diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_no_always_admit_plugin/__init__.py similarity index 100% rename from prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/__init__.py rename to prowler/providers/kubernetes/services/apiserver/apiserver_no_always_admit_plugin/__init__.py diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_no_always_admit_plugin/apiserver_no_always_admit_plugin.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_no_always_admit_plugin/apiserver_no_always_admit_plugin.metadata.json new file mode 100644 index 0000000000..9a9d89fcc3 --- /dev/null +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_no_always_admit_plugin/apiserver_no_always_admit_plugin.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "apiserver_no_always_admit_plugin", + "CheckTitle": "Ensure that the admission control plugin AlwaysAdmit is not set", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "apiserver", + "SubServiceName": "Admission Control", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "KubernetesAPIServer", + "Description": "This check verifies that the Kubernetes API server is not configured with the AlwaysAdmit admission control plugin. The AlwaysAdmit plugin allows all requests without any filtering, which is a security risk and is deprecated.", + "Risk": "Enabling AlwaysAdmit permits all requests by default, bypassing other admission control checks, which can lead to unauthorized access.", + "RelatedUrl": "https://kubernetes.io/docs/admin/admission-controllers/#alwaysadmit", + "Remediation": { + "Code": { + "CLI": "Edit the kube-apiserver configuration to ensure that AlwaysAdmit is not included in the --enable-admission-plugins argument. Remove the plugin if it exists.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Ensure the API server does not use the AlwaysAdmit admission control plugin to maintain proper security checks for all requests.", + "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" + } + }, + "Categories": [ + "Access Control", + "Deprecated Features" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "AlwaysAdmit is deprecated and should not be used. Ensure it is removed from the API server configuration." +} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_no_always_admit_plugin/apiserver_no_always_admit_plugin.py similarity index 96% rename from prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.py rename to prowler/providers/kubernetes/services/apiserver/apiserver_no_always_admit_plugin/apiserver_no_always_admit_plugin.py index d897b9838e..1abe6aba51 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_no_always_admit_plugin/apiserver_no_always_admit_plugin.py @@ -4,7 +4,7 @@ ) -class apiserver_no_alwaysadmit_plugin(Check): +class apiserver_no_always_admit_plugin(Check): def execute(self) -> Check_Report_Kubernetes: findings = [] for pod in apiserver_client.apiserver_pods: diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.metadata.json deleted file mode 100644 index ee62ce0de1..0000000000 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_no_alwaysadmit_plugin/apiserver_no_alwaysadmit_plugin.metadata.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "Provider": "kubernetes", - "CheckID": "apiserver_event_rate_limit", - "CheckTitle": "Ensure that the admission control plugin EventRateLimit is set", - "CheckType": [ - "Security", - "Configuration" - ], - "ServiceName": "apiserver", - "SubServiceName": "Admission Control", - "ResourceIdTemplate": "", - "Severity": "medium", - "ResourceType": "KubernetesAPIServer", - "Description": "This check verifies if the Kubernetes API server is configured with the EventRateLimit admission control plugin. This plugin limits the rate of events accepted by the API Server, preventing potential DoS attacks by misbehaving workloads.", - "Risk": "Without EventRateLimit, the API server could be overwhelmed by a high number of events, leading to DoS and performance issues.", - "RelatedUrl": "https://kubernetes.io/docs/admin/admission-controllers/#eventratelimit", - "Remediation": { - "Code": { - "CLI": "Edit the kube-apiserver configuration to include EventRateLimit in the --enable-admission-plugins argument and specify a configuration file. Example: --enable-admission-plugins=...,EventRateLimit,... --admission-control-config-file=/path/to/configuration/file", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Configure EventRateLimit as an admission control plugin for the API server to manage the rate of incoming events effectively.", - "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" - } - }, - "Categories": [ - "Resource Management", - "Best Practices" - ], - "DependsOn": [], - "RelatedTo": [], - "Notes": "Tuning EventRateLimit requires careful consideration of the specific requirements of your environment." -} diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/__init__.py b/prowler/providers/kubernetes/services/apiserver/apiserver_node_restriction_plugin/__init__.py similarity index 100% rename from prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/__init__.py rename to prowler/providers/kubernetes/services/apiserver/apiserver_node_restriction_plugin/__init__.py diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/apiserver_noderestriction_plugin.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_node_restriction_plugin/apiserver_node_restriction_plugin.metadata.json similarity index 97% rename from prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/apiserver_noderestriction_plugin.metadata.json rename to prowler/providers/kubernetes/services/apiserver/apiserver_node_restriction_plugin/apiserver_node_restriction_plugin.metadata.json index b694fd25ed..5eea56bb09 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/apiserver_noderestriction_plugin.metadata.json +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_node_restriction_plugin/apiserver_node_restriction_plugin.metadata.json @@ -1,6 +1,6 @@ { "Provider": "kubernetes", - "CheckID": "apiserver_noderestriction_plugin", + "CheckID": "apiserver_node_restriction_plugin", "CheckTitle": "Ensure that the admission control plugin NodeRestriction is set", "CheckType": [ "Security", diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/apiserver_noderestriction_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_node_restriction_plugin/apiserver_node_restriction_plugin.py similarity index 96% rename from prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/apiserver_noderestriction_plugin.py rename to prowler/providers/kubernetes/services/apiserver/apiserver_node_restriction_plugin/apiserver_node_restriction_plugin.py index a11c936038..25d527f7a5 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_noderestriction_plugin/apiserver_noderestriction_plugin.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_node_restriction_plugin/apiserver_node_restriction_plugin.py @@ -4,7 +4,7 @@ ) -class apiserver_noderestriction_plugin(Check): +class apiserver_node_restriction_plugin(Check): def execute(self) -> Check_Report_Kubernetes: findings = [] for pod in apiserver_client.apiserver_pods: From 7bb9c5e568f76ebc81ace5d801a75757e075986b Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Wed, 10 Jan 2024 16:35:40 +0100 Subject: [PATCH 07/21] improve checks logic --- .../apiserver_always_pull_images_plugin.py | 19 ++++++------ .../apiserver_anonymous_requests.py | 10 +++++-- .../apiserver_audit_log_maxage_set.py | 19 +++++------- .../apiserver_audit_log_maxbackup_set.py | 21 +++++--------- .../apiserver_audit_log_maxsize_set.py | 19 +++++------- .../apiserver_audit_log_path_set.py | 11 ++++--- .../apiserver_auth_mode_include_node.py | 17 ++++++----- .../apiserver_auth_mode_include_rbac.py | 17 ++++++----- .../apiserver_auth_mode_not_always_allow.py | 14 ++++----- .../apiserver_client_ca_file_set.py | 11 ++----- .../apiserver_deny_service_external_ips.py | 19 +++++------- .../apiserver_disable_profiling.py | 13 ++++----- ...piserver_encryption_provider_config_set.py | 9 +++--- .../apiserver_etcd_cafile_set.py | 9 ++---- .../apiserver_etcd_tls_config.py | 14 ++++----- .../apiserver_event_rate_limit.py | 19 ++++++------ .../apiserver_kubelet_cert_auth.py | 10 +++---- .../apiserver_kubelet_tls_auth.metadata.json | 22 +++++++------- .../apiserver_kubelet_tls_auth.py | 15 ++++------ .../apiserver_namespace_lifecycle_plugin.py | 24 ++++++--------- .../apiserver_no_always_admit_plugin.py | 17 +++++------ .../apiserver_no_token_auth_file.py | 11 ++++--- .../apiserver_node_restriction_plugin.py | 19 +++++------- .../apiserver_request_timeout_set.py | 10 ++----- .../apiserver_security_context_deny_plugin.py | 26 +++++++++-------- ...service_account_key_file_set.metadata.json | 2 +- .../apiserver_service_account_key_file_set.py | 7 ++--- .../apiserver_service_account_lookup_true.py | 9 +++--- .../apiserver_service_account_plugin.py | 26 +++++++---------- .../apiserver_strong_ciphers_only.py | 29 +++++++++---------- .../apiserver_tls_config.py | 14 ++++----- .../controllermanager_garbage_collection.py | 9 ++---- .../etcd_tls_encryption.py | 15 ++++++---- .../scheduler_profiling.py | 15 ++++++---- 34 files changed, 238 insertions(+), 283 deletions(-) diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_always_pull_images_plugin/apiserver_always_pull_images_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_always_pull_images_plugin/apiserver_always_pull_images_plugin.py index 0e53a56dfa..4cff8de256 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_always_pull_images_plugin/apiserver_always_pull_images_plugin.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_always_pull_images_plugin/apiserver_always_pull_images_plugin.py @@ -13,19 +13,18 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = "AlwaysPullImages admission control plugin is set." + report.status_extended = ( + f"AlwaysPullImages admission control plugin is set in pod {pod.name}." + ) plugin_set = False for container in pod.containers.values(): - if "--enable-admission-plugins" in container.command: - admission_plugins = container.command.split( - "--enable-admission-plugins=" - )[1].split(",") - if "AlwaysPullImages" in admission_plugins: - plugin_set = True - break + for command in container.command: + if command.startswith("--enable-admission-plugins"): + if "AlwaysPullImages" in command: + plugin_set = True + break if not plugin_set: - report.resource_id = container.name report.status = "FAIL" - report.status_extended = "AlwaysPullImages admission control plugin is not set in container {container.name}." + report.status_extended = f"AlwaysPullImages admission control plugin is not set in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_anonymous_requests/apiserver_anonymous_requests.py b/prowler/providers/kubernetes/services/apiserver/apiserver_anonymous_requests/apiserver_anonymous_requests.py index 65b3893eb2..4bea8922fb 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_anonymous_requests/apiserver_anonymous_requests.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_anonymous_requests/apiserver_anonymous_requests.py @@ -13,11 +13,15 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = "API Server does not have anonymous-auth enabled." + report.status_extended = ( + f"API Server does not have anonymous-auth enabled in pod {pod.name}." + ) for container in pod.containers.values(): if "--anonymous-auth=true" in container.command: - report.resource_id = container.name + report.status = "FAIL" - report.status_extended = f"API Server has anonymous-auth enabled in container {container.name}." + report.status_extended = ( + f"API Server has anonymous-auth enabled in pod {pod.name}." + ) findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxage_set/apiserver_audit_log_maxage_set.py b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxage_set/apiserver_audit_log_maxage_set.py index b3bde54377..26f0bac80b 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxage_set/apiserver_audit_log_maxage_set.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxage_set/apiserver_audit_log_maxage_set.py @@ -13,25 +13,20 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = ( - "Audit log max age is set appropriately in the API server." - ) + report.status_extended = f"Audit log max age is set appropriately in the API server in pod {pod.name}." audit_log_maxage_set = False for container in pod.containers.values(): # Check if "--audit-log-maxage" is set to 30 or as appropriate - if "--audit-log-maxage" in container.command: - maxage_value = int( - container.command.split("--audit-log-maxage=")[1].split(" ")[0] - ) - if maxage_value >= 30: - audit_log_maxage_set = True - break + for command in container.command: + if command.startswith("--audit-log-maxage"): + if int(command.split("=")[1]) >= 30: + audit_log_maxage_set = True + break if not audit_log_maxage_set: - report.resource_id = container.name report.status = "FAIL" - report.status_extended = "Audit log max age is not set to 30 or as appropriate in container {container.name}." + report.status_extended = "Audit log max age is not set to 30 or as appropriate in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxbackup_set/apiserver_audit_log_maxbackup_set.py b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxbackup_set/apiserver_audit_log_maxbackup_set.py index 61d764cd9a..6850887987 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxbackup_set/apiserver_audit_log_maxbackup_set.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxbackup_set/apiserver_audit_log_maxbackup_set.py @@ -13,26 +13,19 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = ( - "Audit log max backup is set appropriately in the API server." - ) + report.status_extended = f"Audit log max backup is set appropriately in the API server in pod {pod.name}." audit_log_maxbackup_set = False for container in pod.containers.values(): # Check if "--audit-log-maxbackup" is set to 10 or as appropriate - if "--audit-log-maxbackup" in container.command: - maxbackup_value = int( - container.command.split("--audit-log-maxbackup=")[1].split(" ")[ - 0 - ] - ) - if maxbackup_value >= 10: - audit_log_maxbackup_set = True - break + for command in container.command: + if command.startswith("--audit-log-maxbackup"): + if int(command.split("=")[1]) >= 10: + audit_log_maxbackup_set = True + break if not audit_log_maxbackup_set: - report.resource_id = container.name report.status = "FAIL" - report.status_extended = f"Audit log max backup is not set to 10 or as appropriate in container {container.name}." + report.status_extended = f"Audit log max backup is not set to 10 or as appropriate in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxsize_set/apiserver_audit_log_maxsize_set.py b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxsize_set/apiserver_audit_log_maxsize_set.py index 618c63b55a..4c6b45f1c3 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxsize_set/apiserver_audit_log_maxsize_set.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_maxsize_set/apiserver_audit_log_maxsize_set.py @@ -13,25 +13,20 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = ( - "Audit log max size is set appropriately in the API server." - ) + report.status_extended = f"Audit log max size is set appropriately in the API server in pod {pod.name}." audit_log_maxsize_set = False for container in pod.containers.values(): # Check if "--audit-log-maxsize" is set to 100 MB or as appropriate - if "--audit-log-maxsize" in container.command: - maxsize_value = int( - container.command.split("--audit-log-maxsize=")[1].split(" ")[0] - ) - if maxsize_value >= 100: - audit_log_maxsize_set = True - break + for command in container.command: + if command.startswith("--audit-log-maxsize"): + if int(command.split("=")[1]) >= 100: + audit_log_maxsize_set = True + break if not audit_log_maxsize_set: - report.resource_id = container.name report.status = "FAIL" - report.status_extended = f"Audit log max size is not set to 100 MB or as appropriate in container {container.name}." + report.status_extended = f"Audit log max size is not set to 100 MB or as appropriate in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_path_set/apiserver_audit_log_path_set.py b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_path_set/apiserver_audit_log_path_set.py index 3f0de4acd1..c083b0fe87 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_path_set/apiserver_audit_log_path_set.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_audit_log_path_set/apiserver_audit_log_path_set.py @@ -13,21 +13,20 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = "Audit log path is set in the API server." + report.status_extended = ( + f"Audit log path is set in the API server in pod {pod.name}." + ) audit_log_path_set = False for container in pod.containers.values(): # Check if "--audit-log-path" is set - if "--audit-log-path" in container.command: + if "--audit-log-path" in str(container.command): audit_log_path_set = True break if not audit_log_path_set: - report.resource_id = container.name report.status = "FAIL" - report.status_extended = ( - "Audit log path is not set in container {container.name}." - ) + report.status_extended = f"Audit log path is not set in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/apiserver_auth_mode_include_node.py b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/apiserver_auth_mode_include_node.py index 966cb6af7c..8242aeca72 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/apiserver_auth_mode_include_node.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/apiserver_auth_mode_include_node.py @@ -13,14 +13,15 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = "API Server authorization mode includes Node." + report.status_extended = ( + f"API Server authorization mode includes Node in pod {pod.name}." + ) for container in pod.containers.values(): - if ( - "--authorization-mode" in container.command - and "Node" not in container.command - ): - report.resource_id = container.name - report.status = "FAIL" - report.status_extended = f"API Server authorization mode does not include Node in container {container.name}." + for command in container.command: + if command.startswith("--authorization-mode"): + if "Node" not in (command.split("=")[1]): + + report.status = "FAIL" + report.status_extended = f"API Server authorization mode does not include Node in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/apiserver_auth_mode_include_rbac.py b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/apiserver_auth_mode_include_rbac.py index c2ab402e00..13d4c285d8 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/apiserver_auth_mode_include_rbac.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/apiserver_auth_mode_include_rbac.py @@ -13,14 +13,15 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = "API Server authorization mode includes RBAC." + report.status_extended = ( + f"API Server authorization mode includes RBAC in pod {pod.name}." + ) for container in pod.containers.values(): - if ( - "--authorization-mode" in container.command - and "RBAC" not in container.command - ): - report.resource_id = container.name - report.status = "FAIL" - report.status_extended = "API Server authorization mode does not include RBAC in container {container.name}." + for command in container.command: + if command.startswith("--authorization-mode"): + if "RBAC" not in (command.split("=")[1]): + + report.status = "FAIL" + report.status_extended = "API Server authorization mode does not include RBAC in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_not_always_allow/apiserver_auth_mode_not_always_allow.py b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_not_always_allow/apiserver_auth_mode_not_always_allow.py index 3a934b63c8..2b02ffa8bb 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_not_always_allow/apiserver_auth_mode_not_always_allow.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_not_always_allow/apiserver_auth_mode_not_always_allow.py @@ -13,13 +13,13 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = ( - "API Server authorization mode is not set to AlwaysAllow." - ) + report.status_extended = f"API Server authorization mode is not set to AlwaysAllow in pod {pod.name}." for container in pod.containers.values(): - if "--authorization-mode=AlwaysAllow" in container.command: - report.resource_id = container.name - report.status = "FAIL" - report.status_extended = f"API Server authorization mode is set to AlwaysAllow in container {container.name}." + for command in container.command: + if command.startswith("--authorization-mode"): + if "AlwaysAllow" in (command.split("=")[1]): + + report.status = "FAIL" + report.status_extended = f"API Server authorization mode is set to AlwaysAllow in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_client_ca_file_set/apiserver_client_ca_file_set.py b/prowler/providers/kubernetes/services/apiserver/apiserver_client_ca_file_set/apiserver_client_ca_file_set.py index 7cf0340ccd..778adbdc11 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_client_ca_file_set/apiserver_client_ca_file_set.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_client_ca_file_set/apiserver_client_ca_file_set.py @@ -13,22 +13,17 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = ( - "Client CA file is set appropriately in the API server." - ) + report.status_extended = f"Client CA file is set appropriately in the API server in pod {pod.name}." client_ca_file_set = False for container in pod.containers.values(): # Check if "--client-ca-file" is set - if "--client-ca-file" in container.command: + if "--client-ca-file" in str(container.command): client_ca_file_set = True break if not client_ca_file_set: - report.resource_id = container.name report.status = "FAIL" - report.status_extended = ( - "Client CA file is not set in container {container.name}." - ) + report.status_extended = f"Client CA file is not set in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/apiserver_deny_service_external_ips.py b/prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/apiserver_deny_service_external_ips.py index b2c0ba4612..88479d4f87 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/apiserver_deny_service_external_ips.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_deny_service_external_ips/apiserver_deny_service_external_ips.py @@ -12,17 +12,14 @@ def execute(self) -> Check_Report_Kubernetes: report.namespace = pod.namespace report.resource_name = pod.name report.resource_id = pod.uid - report.status = "PASS" - report.status_extended = ( - "API Server has DenyServiceExternalIPs admission controller enabled." - ) + report.status = "FAIL" + report.status_extended = f"API Server does not have DenyServiceExternalIPs enabled in container in pod {pod.name}." for container in pod.containers.values(): - if ( - "--disable-admission-plugins=DenyServiceExternalIPs" - in container.command - ): - report.resource_id = container.name - report.status = "FAIL" - report.status_extended = f"API Server does not have DenyServiceExternalIPs enabled in container {container.name}." + for command in container.command: + if command.startswith("--disable-admission-plugins"): + if "DenyServiceExternalIPs" in (command.split("=")[1]): + + report.status = "PASS" + report.status_extended = f"API Server has DenyServiceExternalIPs admission controller enabled in {container.name} within pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_disable_profiling/apiserver_disable_profiling.py b/prowler/providers/kubernetes/services/apiserver/apiserver_disable_profiling/apiserver_disable_profiling.py index ba5f77e8ef..311b2dc30b 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_disable_profiling/apiserver_disable_profiling.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_disable_profiling/apiserver_disable_profiling.py @@ -13,19 +13,16 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = "Profiling is disabled in the API server." - profiling_enabled = False + report.status_extended = f"Profiling is disabled in pod {pod.name}." + profiling_enabled = True for container in pod.containers.values(): # Check if "--profiling" is set to false - if "--profiling=false" not in container.command: - profiling_enabled = True + if "--profiling=false" in container.command: + profiling_enabled = False break if profiling_enabled: - report.resource_id = container.name report.status = "FAIL" - report.status_extended = ( - f"Profiling is enabled in container {container.name}." - ) + report.status_extended = f"Profiling is enabled in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_encryption_provider_config_set/apiserver_encryption_provider_config_set.py b/prowler/providers/kubernetes/services/apiserver/apiserver_encryption_provider_config_set/apiserver_encryption_provider_config_set.py index f19788f993..ad05f4a7c9 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_encryption_provider_config_set/apiserver_encryption_provider_config_set.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_encryption_provider_config_set/apiserver_encryption_provider_config_set.py @@ -14,20 +14,21 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_id = pod.uid report.status = "PASS" report.status_extended = ( - "Encryption provider config is set appropriately in the API server." + f"Encryption provider config is set appropriately in pod {pod.name}." ) encryption_provider_config_set = False for container in pod.containers.values(): # Check if "--encryption-provider-config" is set - if "--encryption-provider-config" in container.command: + if "--encryption-provider-config" in str(container.command): encryption_provider_config_set = True break if not encryption_provider_config_set: - report.resource_id = container.name report.status = "FAIL" - report.status_extended = f"Encryption provider config is not set in container {container.name}." + report.status_extended = ( + f"Encryption provider config is not set in pod {pod.name}." + ) findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_cafile_set/apiserver_etcd_cafile_set.py b/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_cafile_set/apiserver_etcd_cafile_set.py index a1099c30af..af603b305d 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_cafile_set/apiserver_etcd_cafile_set.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_cafile_set/apiserver_etcd_cafile_set.py @@ -14,21 +14,18 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_id = pod.uid report.status = "PASS" report.status_extended = ( - "etcd CA file is set appropriately in the API server." + f"etcd CA file is set appropriately in pod {pod.name}." ) etcd_cafile_set = False for container in pod.containers.values(): # Check if "--etcd-cafile" is set - if "--etcd-cafile" in container.command: + if "--etcd-cafile" in str(container.command): etcd_cafile_set = True break if not etcd_cafile_set: - report.resource_id = container.name report.status = "FAIL" - report.status_extended = ( - f"etcd CA file is not set in container {container.name}." - ) + report.status_extended = f"etcd CA file is not set in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_tls_config/apiserver_etcd_tls_config.py b/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_tls_config/apiserver_etcd_tls_config.py index 44a33e90fd..81af504a4a 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_tls_config/apiserver_etcd_tls_config.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_etcd_tls_config/apiserver_etcd_tls_config.py @@ -14,22 +14,22 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_id = pod.uid report.status = "PASS" report.status_extended = ( - "TLS configuration for etcd is set appropriately in the API server." + f"TLS configuration for etcd is set appropriately in pod {pod.name}." ) etcd_tls_config_set = False for container in pod.containers.values(): # Check if "--etcd-certfile" and "--etcd-keyfile" are set - if ( - "--etcd-certfile" in container.command - and "--etcd-keyfile" in container.command - ): + if "--etcd-certfile" in str( + container.command + ) and "--etcd-keyfile" in str(container.command): etcd_tls_config_set = True break if not etcd_tls_config_set: - report.resource_id = container.name report.status = "FAIL" - report.status_extended = f"TLS configuration for etcd is not set in container {container.name}." + report.status_extended = ( + f"TLS configuration for etcd is not set in pod {pod.name}." + ) findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/apiserver_event_rate_limit.py b/prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/apiserver_event_rate_limit.py index 09a8c512cd..50f1f8da38 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/apiserver_event_rate_limit.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/apiserver_event_rate_limit.py @@ -13,20 +13,19 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = "EventRateLimit admission control plugin is set." + report.status_extended = ( + f"EventRateLimit admission control plugin is set in pod {pod.name}." + ) plugin_set = False for container in pod.containers.values(): - if "--enable-admission-plugins" in container.command: - admission_plugins = container.command.split( - "--enable-admission-plugins=" - )[1].split(",") - if "EventRateLimit" not in admission_plugins: - plugin_set = True - break + for command in container.command: + if command.startswith("--enable-admission-plugins"): + if "EventRateLimit" not in (command.split("=")[1]): + plugin_set = True + break if not plugin_set: - report.resource_id = container.name report.status = "FAIL" - report.status_extended = f"EventRateLimit admission control plugin is not set in container {container.name}." + report.status_extended = f"EventRateLimit admission control plugin is not set in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_cert_auth/apiserver_kubelet_cert_auth.py b/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_cert_auth/apiserver_kubelet_cert_auth.py index e5cbf89e01..908199d023 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_cert_auth/apiserver_kubelet_cert_auth.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_cert_auth/apiserver_kubelet_cert_auth.py @@ -13,13 +13,11 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = ( - "API Server has appropriate kubelet certificate authority configured." - ) + report.status_extended = f"API Server has appropriate kubelet certificate authority configured in pod {pod.name}." for container in pod.containers.values(): - if "--kubelet-certificate-authority" not in container.command: - report.resource_id = container.name + if "--kubelet-certificate-authority" not in str(container.command): + report.status = "FAIL" - report.status_extended = f"API Server is missing kubelet certificate authority configuration in container {container.name}." + report.status_extended = f"API Server is missing kubelet certificate authority configuration in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.metadata.json index aef64b3bc7..38ee53c84d 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.metadata.json +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.metadata.json @@ -1,36 +1,36 @@ { "Provider": "kubernetes", - "CheckID": "apiserver_no_token_auth_file", - "CheckTitle": "Ensure that the --token-auth-file parameter is not set", + "CheckID": "apiserver_kubelet_tls_auth", + "CheckTitle": "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", "CheckType": [ "Security", "Configuration" ], "ServiceName": "apiserver", - "SubServiceName": "Authentication", + "SubServiceName": "TLS Authentication", "ResourceIdTemplate": "", "Severity": "high", "ResourceType": "KubernetesAPIServer", - "Description": "This check ensures that the Kubernetes API server is not using static token-based authentication, which is less secure. Static tokens are stored in clear-text and lack features like revocation or rotation without restarting the API server.", - "Risk": "Using static token-based authentication exposes the cluster to security risks due to the static nature of the tokens, their clear-text storage, and the inability to revoke or rotate them easily.", - "RelatedUrl": "https://kubernetes.io/docs/admin/authentication/#static-token-file", + "Description": "This check ensures that the Kubernetes API server is set up with certificate-based authentication to the kubelet. This setup requires the --kubelet-client-certificate and --kubelet-client-key arguments in the kube-apiserver configuration to be set, ensuring secure communication between the API server and kubelets.", + "Risk": "Without certificate-based authentication to kubelets, requests from the apiserver are treated as anonymous, which could lead to unauthorized access and manipulation of node resources.", + "RelatedUrl": "https://kubernetes.io/docs/admin/kubelet-authentication-authorization/", "Remediation": { "Code": { - "CLI": "Remove the --token-auth-file parameter from the kube-apiserver configuration. Edit /etc/kubernetes/manifests/kube-apiserver.yaml on the master node and remove the --token-auth-file= parameter.", + "CLI": "Set the --kubelet-client-certificate and --kubelet-client-key arguments in the kube-apiserver configuration. Example: --kubelet-client-certificate=/path/to/client-certificate-file --kubelet-client-key=/path/to/client-key-file", "NativeIaC": "", "Other": "", "Terraform": "" }, "Recommendation": { - "Text": "Replace token-based authentication with more secure mechanisms like client certificate authentication. Ensure the --token-auth-file argument is not used in the API server configuration.", + "Text": "Enable TLS authentication between the apiserver and kubelets by specifying the client certificate and key in the kube-apiserver configuration.", "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" } }, "Categories": [ - "Access Control", - "Security Best Practices" + "Cluster Security", + "Communication Security" ], "DependsOn": [], "RelatedTo": [], - "Notes": "By default, the --token-auth-file argument is not set in the kube-apiserver. Ensure it remains unset or is removed if currently in use." + "Notes": "By default, the kube-apiserver does not authenticate to kubelets using certificates. Enabling this increases the security posture of the cluster." } diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.py b/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.py index b0a84cfafd..8a1217f388 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_kubelet_tls_auth/apiserver_kubelet_tls_auth.py @@ -13,16 +13,13 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = ( - "API Server has appropriate kubelet TLS authentication configured." - ) + report.status_extended = f"API Server has appropriate kubelet TLS authentication configured in pod {pod.name}." for container in pod.containers.values(): - if ( - "--kubelet-client-certificate" not in container.command - or "--kubelet-client-key" not in container.command - ): - report.resource_id = container.name + if "--kubelet-client-certificate" not in str( + container.command + ) and "--kubelet-client-key" not in str(container.command): + report.status = "FAIL" - report.status_extended = f"API Server is missing kubelet TLS authentication arguments in container {container.name}." + report.status_extended = f"API Server is missing kubelet TLS authentication arguments in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_namespace_lifecycle_plugin/apiserver_namespace_lifecycle_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_namespace_lifecycle_plugin/apiserver_namespace_lifecycle_plugin.py index 83c6a6be57..580f5fde0f 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_namespace_lifecycle_plugin/apiserver_namespace_lifecycle_plugin.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_namespace_lifecycle_plugin/apiserver_namespace_lifecycle_plugin.py @@ -14,30 +14,24 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_id = pod.uid report.status = "PASS" report.status_extended = ( - "NamespaceLifecycle admission control plugin is set." + f"NamespaceLifecycle admission control plugin is set in pod {pod.name}." ) namespace_lifecycle_plugin_set = False for container in pod.containers.values(): # Check if "--enable-admission-plugins" includes "NamespaceLifecycle" # and "--disable-admission-plugins" does not include "NamespaceLifecycle" - if "--enable-admission-plugins" in container.command: - admission_plugins = container.command.split( - "--enable-admission-plugins=" - )[1].split(",") - if "NamespaceLifecycle" in admission_plugins: - namespace_lifecycle_plugin_set = True - if "--disable-admission-plugins" in container.command: - disabled_plugins = container.command.split( - "--disable-admission-plugins=" - )[1].split(",") - if "NamespaceLifecycle" in disabled_plugins: - namespace_lifecycle_plugin_set = False + for command in container.command: + if command.startswith("--enable-admission-plugins"): + if "NamespaceLifecycle" in (command.split("=")[1]): + namespace_lifecycle_plugin_set = True + elif command.startswith("--disable-admission-plugins"): + if "NamespaceLifecycle" in (command.split("=")[1]): + namespace_lifecycle_plugin_set = False if not namespace_lifecycle_plugin_set: - report.resource_id = container.name report.status = "FAIL" - report.status_extended = f"NamespaceLifecycle admission control plugin is not set in container {container.name}." + report.status_extended = f"NamespaceLifecycle admission control plugin is not set in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_no_always_admit_plugin/apiserver_no_always_admit_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_no_always_admit_plugin/apiserver_no_always_admit_plugin.py index 1abe6aba51..e276c07dd4 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_no_always_admit_plugin/apiserver_no_always_admit_plugin.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_no_always_admit_plugin/apiserver_no_always_admit_plugin.py @@ -13,15 +13,14 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = "AlwaysAdmit admission control plugin is not set." + report.status_extended = ( + f"AlwaysAdmit admission control plugin is not set in pod {pod.name}." + ) for container in pod.containers.values(): - if "--enable-admission-plugins" in container.command: - admission_plugins = container.command.split( - "--enable-admission-plugins=" - )[1].split(",") - if "AlwaysAdmit" in admission_plugins: - report.resource_id = container.name - report.status = "FAIL" - report.status_extended = "AlwaysAdmit admission control plugin is set in container {container.name}." + for command in container.command: + if command.startswith("--enable-admission-plugins"): + if "AlwaysAdmit" in (command.split("=")[1]): + report.status = "FAIL" + report.status_extended = f"AlwaysAdmit admission control plugin is set in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/apiserver_no_token_auth_file.py b/prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/apiserver_no_token_auth_file.py index 6690dbeebd..92facb3e11 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/apiserver_no_token_auth_file.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_no_token_auth_file/apiserver_no_token_auth_file.py @@ -13,11 +13,14 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = "API Server does not have token-auth-file enabled." + report.status_extended = ( + f"API Server does not have token-auth-file enabled in pod {pod.name}." + ) for container in pod.containers.values(): - if "--token-auth-file" in container.command: - report.resource_id = container.name + if "--token-auth-file" in str(container.command): report.status = "FAIL" - report.status_extended = f"API Server has token-auth-file enabled in container {container.name}." + report.status_extended = ( + f"API Server has token-auth-file enabled in pod {pod.name}." + ) findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_node_restriction_plugin/apiserver_node_restriction_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_node_restriction_plugin/apiserver_node_restriction_plugin.py index 25d527f7a5..c442a3237f 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_node_restriction_plugin/apiserver_node_restriction_plugin.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_node_restriction_plugin/apiserver_node_restriction_plugin.py @@ -13,23 +13,20 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = "NodeRestriction admission control plugin is set." - + report.status_extended = ( + f"NodeRestriction admission control plugin is set in pod {pod.name}." + ) node_restriction_plugin_set = False for container in pod.containers.values(): # Check if "--enable-admission-plugins" includes "NodeRestriction" - if "--enable-admission-plugins" in container.command: - admission_plugins = container.command.split( - "--enable-admission-plugins=" - )[1].split(",") - if "NodeRestriction" in admission_plugins: - node_restriction_plugin_set = True - break + for command in container.command: + if command.startswith("--enable-admission-plugins"): + if "NodeRestriction" in (command.split("=")[1]): + node_restriction_plugin_set = True if not node_restriction_plugin_set: - report.resource_id = container.name report.status = "FAIL" - report.status_extended = f"NodeRestriction admission control plugin is not set in container {container.name}." + report.status_extended = f"NodeRestriction admission control plugin is not set in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_request_timeout_set/apiserver_request_timeout_set.py b/prowler/providers/kubernetes/services/apiserver/apiserver_request_timeout_set/apiserver_request_timeout_set.py index de480cdb86..33a964176d 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_request_timeout_set/apiserver_request_timeout_set.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_request_timeout_set/apiserver_request_timeout_set.py @@ -14,23 +14,19 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_id = pod.uid report.status = "PASS" report.status_extended = ( - "Request timeout is set appropriately in the API server." + f"Request timeout is set appropriately in pod {pod.name}." ) request_timeout_set = False for container in pod.containers.values(): # Check if "--request-timeout" is set to an appropriate value - if "--request-timeout" in container.command: - # timeout_value = container.command.split("--request-timeout=")[ - # 1 - # ].split(" ")[0] + if "--request-timeout" in str(container.command): # Assuming the value is valid, e.g., '300s' or '1m' request_timeout_set = True break if not request_timeout_set: - report.resource_id = container.name report.status = "FAIL" - report.status_extended = "Request timeout is not set or not set appropriately in container {container.name}." + report.status_extended = f"Request timeout is not set or not set appropriately in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_security_context_deny_plugin/apiserver_security_context_deny_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_security_context_deny_plugin/apiserver_security_context_deny_plugin.py index 3cf4d322eb..2dbb91e684 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_security_context_deny_plugin/apiserver_security_context_deny_plugin.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_security_context_deny_plugin/apiserver_security_context_deny_plugin.py @@ -12,25 +12,27 @@ def execute(self) -> Check_Report_Kubernetes: report.namespace = pod.namespace report.resource_name = pod.name report.resource_id = pod.uid - report.status = "PASS" - report.status_extended = "SecurityContextDeny admission control plugin is set or PodSecurityPolicy is in use." security_context_deny_set = False pod_security_policy_set = False for container in pod.containers.values(): - if "--enable-admission-plugins" in container.command: - admission_plugins = container.command.split( - "--enable-admission-plugins=" - )[1].split(",") - security_context_deny_set = ( - "SecurityContextDeny" in admission_plugins - ) - pod_security_policy_set = "PodSecurityPolicy" in admission_plugins + for command in container.command: + if command.startswith("--enable-admission-plugins"): + if "SecurityContextDeny" in (command.split("=")[1]): + security_context_deny_set = True + if "PodSecurityPolicy" in (command.split("=")[1]): + pod_security_policy_set = True - if security_context_deny_set or pod_security_policy_set: + if pod_security_policy_set: report.status = "PASS" + report.status_extended = ( + f"PodSecurityPolicy is in use in pod {pod.name}." + ) + elif security_context_deny_set: + report.status = "PASS" + report.status_extended = f"SecurityContextDeny admission control plugin is set in pod {pod.name}." else: report.status = "FAIL" - report.status_extended = "Neither SecurityContextDeny nor PodSecurityPolicy admission control plugins are set in container {container.name}." + report.status_extended = f"Neither SecurityContextDeny nor PodSecurityPolicy admission control plugins are set in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/apiserver_service_account_key_file_set.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/apiserver_service_account_key_file_set.metadata.json index c576e2cab6..0542eb3c53 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/apiserver_service_account_key_file_set.metadata.json +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/apiserver_service_account_key_file_set.metadata.json @@ -22,7 +22,7 @@ "Terraform": "" }, "Recommendation": { - "Text": "Specify a separate public key file for verifying service account tokens in the API server.", + "Text": "Specify a separate public key file for verifying service account tokens in pod {pod.name}.", "Url": "https://kubernetes.io/docs/admin/kube-apiserver/" } }, diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/apiserver_service_account_key_file_set.py b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/apiserver_service_account_key_file_set.py index 2a00f9966d..ebdc236d80 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/apiserver_service_account_key_file_set.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_key_file_set/apiserver_service_account_key_file_set.py @@ -14,21 +14,20 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_id = pod.uid report.status = "PASS" report.status_extended = ( - "Service account key file is set appropriately in the API server." + f"Service account key file is set appropriately in pod {pod.name}." ) service_account_key_file_set = False for container in pod.containers.values(): # Check if "--service-account-key-file" is set - if "--service-account-key-file" in container.command: + if "--service-account-key-file" in str(container.command): service_account_key_file_set = True break if not service_account_key_file_set: - report.resource_id = container.name report.status = "FAIL" report.status_extended = ( - "Service account key file is not set in container {container.name}." + f"Service account key file is not set in pod {pod.name}." ) findings.append(report) diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/apiserver_service_account_lookup_true.py b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/apiserver_service_account_lookup_true.py index da6439691c..545d5df27e 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/apiserver_service_account_lookup_true.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/apiserver_service_account_lookup_true.py @@ -14,20 +14,21 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_id = pod.uid report.status = "PASS" report.status_extended = ( - "Service account lookup is set to true in the API server." + f"Service account lookup is set to true in pod {pod.name}." ) service_account_lookup_set = False for container in pod.containers.values(): # Check if "--service-account-lookup" is set to true - if "--service-account-lookup=true" in container.command: + if "--service-account-lookup=true" in str(container.command): service_account_lookup_set = True break if not service_account_lookup_set: - report.resource_id = container.name report.status = "FAIL" - report.status_extended = "Service account lookup is not set to true in container {container.name}." + report.status_extended = ( + f"Service account lookup is not set to true in pod {pod.name}." + ) findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_plugin/apiserver_service_account_plugin.py b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_plugin/apiserver_service_account_plugin.py index 310bcd48f4..a441be06c1 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_plugin/apiserver_service_account_plugin.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_plugin/apiserver_service_account_plugin.py @@ -13,29 +13,25 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = "ServiceAccount admission control plugin is set." + report.status_extended = ( + f"ServiceAccount admission control plugin is set in pod {pod.name}." + ) service_account_plugin_set = False for container in pod.containers.values(): # Check if "--enable-admission-plugins" includes "ServiceAccount" # and "--disable-admission-plugins" does not include "ServiceAccount" - if "--enable-admission-plugins" in container.command: - admission_plugins = container.command.split( - "--enable-admission-plugins=" - )[1].split(",") - if "ServiceAccount" in admission_plugins: - service_account_plugin_set = True - if "--disable-admission-plugins" in container.command: - disabled_plugins = container.command.split( - "--disable-admission-plugins=" - )[1].split(",") - if "ServiceAccount" in disabled_plugins: - service_account_plugin_set = False + for command in container.command: + if command.startswith("--enable-admission-plugins"): + if "ServiceAccount" in (command.split("=")[1]): + service_account_plugin_set = True + elif command.startswith("--disable-admission-plugins"): + if "ServiceAccount" in (command.split("=")[1]): + service_account_plugin_set = False if not service_account_plugin_set: - report.resource_id = container.name report.status = "FAIL" - report.status_extended = "ServiceAccount admission control plugin is not set in container {container.name}." + report.status_extended = f"ServiceAccount admission control plugin is not set in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_strong_ciphers_only/apiserver_strong_ciphers_only.py b/prowler/providers/kubernetes/services/apiserver/apiserver_strong_ciphers_only/apiserver_strong_ciphers_only.py index e32bca2ec4..7af3fd2ba4 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_strong_ciphers_only/apiserver_strong_ciphers_only.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_strong_ciphers_only/apiserver_strong_ciphers_only.py @@ -13,27 +13,24 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = ( - "API Server is configured with strong cryptographic ciphers." - ) - strong_ciphers_set = False + report.status_extended = f"API Server is configured with strong cryptographic ciphers in pod {pod.name}." + strong_ciphers_set = True for container in pod.containers.values(): # Check if strong ciphers are set in "--tls-cipher-suites" - if "--tls-cipher-suites" in container.command and all( - cipher in container.command - for cipher in [ - "TLS_AES_128_GCM_SHA256", - "TLS_AES_256_GCM_SHA384", - "TLS_CHACHA20_POLY1305_SHA256", - ] - ): - strong_ciphers_set = True - break + for command in container.command: + if command.startswith("--tls-cipher-suites"): + for cipher in command.split("=")[1].split(","): + if cipher not in [ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + ]: + strong_ciphers_set = False + break if not strong_ciphers_set: - report.resource_id = container.name report.status = "FAIL" - report.status_extended = "API Server is not using only strong cryptographic ciphers in container {container.name}." + report.status_extended = f"API Server is not using only strong cryptographic ciphers in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_tls_config/apiserver_tls_config.py b/prowler/providers/kubernetes/services/apiserver/apiserver_tls_config/apiserver_tls_config.py index 85650bbccb..a67e114dce 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_tls_config/apiserver_tls_config.py +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_tls_config/apiserver_tls_config.py @@ -14,22 +14,22 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_id = pod.uid report.status = "PASS" report.status_extended = ( - "TLS certificate and key are set appropriately in the API server." + "TLS certificate and key are set appropriately in pod {pod.name}." ) tls_config_set = False for container in pod.containers.values(): # Check if both "--tls-cert-file" and "--tls-private-key-file" are set - if ( - "--tls-cert-file" in container.command - and "--tls-private-key-file" in container.command - ): + if "--tls-cert-file" in str( + container.command + ) and "--tls-private-key-file" in str(container.command): tls_config_set = True break if not tls_config_set: - report.resource_id = container.name report.status = "FAIL" - report.status_extended = "TLS certificate and/or key are not set in container {container.name}." + report.status_extended = ( + "TLS certificate and/or key are not set in pod {pod.name}." + ) findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/controllermanager_garbage_collection.py b/prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/controllermanager_garbage_collection.py index fa0996220b..1a20849198 100644 --- a/prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/controllermanager_garbage_collection.py +++ b/prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/controllermanager_garbage_collection.py @@ -13,13 +13,10 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "PASS" - report.status_extended = ( - "Controller Manager has an appropriate garbage collection threshold." - ) + report.status_extended = f"Controller Manager has an appropriate garbage collection threshold in pod {pod.name}." for container in pod.containers.values(): - if "--terminated-pod-gc-threshold=12500" in container.command: - report.resource_id = container.name + if "--terminated-pod-gc-threshold=12500" in str(container.command): report.status = "FAIL" - report.status_extended = "Controller Manager has the default garbage collection threshold." + report.status_extended = f"Controller Manager has the default garbage collection threshold in pod {pod.name}." findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.py b/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.py index 28791848bd..07f04e34d5 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.py @@ -11,14 +11,17 @@ def execute(self) -> Check_Report_Kubernetes: report.resource_name = pod.name report.resource_id = pod.uid report.status = "FAIL" - report.status_extended = "Etcd does not have TLS encryption configured." + report.status_extended = ( + f"Etcd does not have TLS encryption configured in pod {pod.name}." + ) for container in pod.containers.values(): - if ( - "--cert-file" in container.command - and "--key-file" in container.command + if "--cert-file" in str(container.command) and "--key-file" in str( + container.command ): - report.resource_id = container.name + report.status = "PASS" - report.status_extended = "Etcd has configured TLS encryption." + report.status_extended = ( + f"Etcd has configured TLS encryption in pod {pod.name}." + ) findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/scheduler/scheduler_profiling/scheduler_profiling.py b/prowler/providers/kubernetes/services/scheduler/scheduler_profiling/scheduler_profiling.py index b591dc3af7..d62c56e3be 100644 --- a/prowler/providers/kubernetes/services/scheduler/scheduler_profiling/scheduler_profiling.py +++ b/prowler/providers/kubernetes/services/scheduler/scheduler_profiling/scheduler_profiling.py @@ -12,12 +12,15 @@ def execute(self) -> Check_Report_Kubernetes: report.namespace = pod.namespace report.resource_name = pod.name report.resource_id = pod.uid - report.status = "PASS" - report.status_extended = "Scheduler does not have profiling enabled." + report.status = "FAIL" + report.status_extended = ( + f"Scheduler has profiling enabled in pod {pod.name}." + ) for container in pod.containers.values(): - if "--profiling=true" in container.command: - report.resource_id = container.name - report.status = "FAIL" - report.status_extended = f"Scheduler has profiling enabled in container {container.name}." + if "--profiling=false" in str(container.command): + report.status = "PASS" + report.status_extended = ( + f"Scheduler does not have profiling enabled in pod {pod.name}." + ) findings.append(report) return findings From f743551f82bb0ffdf6ca446cd3869d223910952a Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Wed, 10 Jan 2024 16:39:50 +0100 Subject: [PATCH 08/21] improve checks logic --- .../apiserver_auth_mode_include_node.metadata.json | 2 +- .../apiserver_auth_mode_include_rbac.metadata.json | 2 +- .../apiserver_event_rate_limit.metadata.json | 2 +- .../apiserver_security_context_deny_plugin.metadata.json | 2 +- .../apiserver_service_account_lookup_true.metadata.json | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/apiserver_auth_mode_include_node.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/apiserver_auth_mode_include_node.metadata.json index 1d4a7d244f..f9cccfc8d6 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/apiserver_auth_mode_include_node.metadata.json +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_node/apiserver_auth_mode_include_node.metadata.json @@ -28,7 +28,7 @@ }, "Categories": [ "Access Control", - "Best Practices" + "Security Best Practices" ], "DependsOn": [], "RelatedTo": [], diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/apiserver_auth_mode_include_rbac.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/apiserver_auth_mode_include_rbac.metadata.json index c1214eea25..848a9f009c 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/apiserver_auth_mode_include_rbac.metadata.json +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_auth_mode_include_rbac/apiserver_auth_mode_include_rbac.metadata.json @@ -28,7 +28,7 @@ }, "Categories": [ "Access Control", - "Best Practices" + "Security Best Practices" ], "DependsOn": [], "RelatedTo": [], diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/apiserver_event_rate_limit.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/apiserver_event_rate_limit.metadata.json index ee62ce0de1..ff560542bc 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/apiserver_event_rate_limit.metadata.json +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_event_rate_limit/apiserver_event_rate_limit.metadata.json @@ -28,7 +28,7 @@ }, "Categories": [ "Resource Management", - "Best Practices" + "Security Best Practices" ], "DependsOn": [], "RelatedTo": [], diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_security_context_deny_plugin/apiserver_security_context_deny_plugin.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_security_context_deny_plugin/apiserver_security_context_deny_plugin.metadata.json index e3687334f8..c9b0061a0b 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_security_context_deny_plugin/apiserver_security_context_deny_plugin.metadata.json +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_security_context_deny_plugin/apiserver_security_context_deny_plugin.metadata.json @@ -28,7 +28,7 @@ }, "Categories": [ "Pod Security", - "Best Practices" + "Security Best Practices" ], "DependsOn": [], "RelatedTo": [], diff --git a/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/apiserver_service_account_lookup_true.metadata.json b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/apiserver_service_account_lookup_true.metadata.json index c023ae4f22..ffac85c329 100644 --- a/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/apiserver_service_account_lookup_true.metadata.json +++ b/prowler/providers/kubernetes/services/apiserver/apiserver_service_account_lookup_true/apiserver_service_account_lookup_true.metadata.json @@ -28,7 +28,7 @@ }, "Categories": [ "Access Control", - "Best Practices" + "Security Best Practices" ], "DependsOn": [], "RelatedTo": [], From ed394cab45851242efb35d5d57f8af2fe5bc2e43 Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Tue, 16 Jan 2024 17:51:30 +0100 Subject: [PATCH 09/21] feat(controllermanager): add checks for Kubernetes Controller Manager --- .../__init__.py | 0 ...ntrollermanager_bind_address.metadata.json | 36 +++++++++++++++++++ .../controllermanager_bind_address.py | 24 +++++++++++++ .../__init__.py | 0 ...lermanager_disable_profiling.metadata.json | 35 ++++++++++++++++++ .../controllermanager_disable_profiling.py | 24 +++++++++++++ ...ermanager_garbage_collection.metadata.json | 2 +- .../__init__.py | 0 ...llermanager_root_ca_file_set.metadata.json | 36 +++++++++++++++++++ .../controllermanager_root_ca_file_set.py | 24 +++++++++++++ .../__init__.py | 0 ...r_rotate_kubelet_server_cert.metadata.json | 36 +++++++++++++++++++ ...ollermanager_rotate_kubelet_server_cert.py | 26 ++++++++++++++ .../__init__.py | 0 ..._service_account_credentials.metadata.json | 36 +++++++++++++++++++ ...llermanager_service_account_credentials.py | 22 ++++++++++++ .../__init__.py | 0 ...ice_account_private_key_file.metadata.json | 36 +++++++++++++++++++ ...anager_service_account_private_key_file.py | 22 ++++++++++++ 19 files changed, 358 insertions(+), 1 deletion(-) create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_bind_address/__init__.py create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_bind_address/controllermanager_bind_address.metadata.json create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_bind_address/controllermanager_bind_address.py create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_disable_profiling/__init__.py create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_disable_profiling/controllermanager_disable_profiling.metadata.json create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_disable_profiling/controllermanager_disable_profiling.py create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_root_ca_file_set/__init__.py create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_root_ca_file_set/controllermanager_root_ca_file_set.metadata.json create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_root_ca_file_set/controllermanager_root_ca_file_set.py create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_rotate_kubelet_server_cert/__init__.py create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_rotate_kubelet_server_cert/controllermanager_rotate_kubelet_server_cert.metadata.json create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_rotate_kubelet_server_cert/controllermanager_rotate_kubelet_server_cert.py create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_credentials/__init__.py create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_credentials/controllermanager_service_account_credentials.metadata.json create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_credentials/controllermanager_service_account_credentials.py create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_private_key_file/__init__.py create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_private_key_file/controllermanager_service_account_private_key_file.metadata.json create mode 100644 prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_private_key_file/controllermanager_service_account_private_key_file.py diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_bind_address/__init__.py b/prowler/providers/kubernetes/services/controllermanager/controllermanager_bind_address/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_bind_address/controllermanager_bind_address.metadata.json b/prowler/providers/kubernetes/services/controllermanager/controllermanager_bind_address/controllermanager_bind_address.metadata.json new file mode 100644 index 0000000000..f981886c5e --- /dev/null +++ b/prowler/providers/kubernetes/services/controllermanager/controllermanager_bind_address/controllermanager_bind_address.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "controllermanager_bind_address", + "CheckTitle": "Ensure that the --bind-address argument is set to 127.0.0.1", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "controller-manager", + "SubServiceName": "Bind Address Configuration", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesControllerManager", + "Description": "This check verifies that the Kubernetes Controller Manager is bound to the loopback address (127.0.0.1) to minimize the cluster's attack surface. Binding to the loopback address ensures that the Controller Manager API service is not exposed to unauthorized network access.", + "Risk": "Binding the Controller Manager to a non-loopback address exposes sensitive health and metrics information without authentication or encryption.", + "RelatedUrl": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/", + "Remediation": { + "Code": { + "CLI": "Edit the controller-manager manifest to set --bind-address to 127.0.0.1.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Bind the Controller Manager to the loopback address for enhanced security.", + "Url": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/" + } + }, + "Categories": [ + "Network Security", + "Configuration Management" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Check for the --address argument as well, as it might be used instead of --bind-address in certain Kubernetes versions." +} diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_bind_address/controllermanager_bind_address.py b/prowler/providers/kubernetes/services/controllermanager/controllermanager_bind_address/controllermanager_bind_address.py new file mode 100644 index 0000000000..77b9c4270d --- /dev/null +++ b/prowler/providers/kubernetes/services/controllermanager/controllermanager_bind_address/controllermanager_bind_address.py @@ -0,0 +1,24 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.controllermanager.controllermanager_client import ( + controllermanager_client, +) + + +class controllermanager_bind_address(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in controllermanager_client.controllermanager_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "FAIL" + report.status_extended = f"Controller Manager is not bound to the loopback address in pod {pod.name}." + for container in pod.containers.values(): + if "--bind-address=127.0.0.1" in str( + container.command + ) or "--address=127.0.0.1" in str(container.command): + report.status = "PASS" + report.status_extended = f"Controller Manager is bound to the loopback address in pod {pod.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_disable_profiling/__init__.py b/prowler/providers/kubernetes/services/controllermanager/controllermanager_disable_profiling/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_disable_profiling/controllermanager_disable_profiling.metadata.json b/prowler/providers/kubernetes/services/controllermanager/controllermanager_disable_profiling/controllermanager_disable_profiling.metadata.json new file mode 100644 index 0000000000..12a771152c --- /dev/null +++ b/prowler/providers/kubernetes/services/controllermanager/controllermanager_disable_profiling/controllermanager_disable_profiling.metadata.json @@ -0,0 +1,35 @@ +{ + "Provider": "kubernetes", + "CheckID": "controller_manager_disable_profiling", + "CheckTitle": "Ensure that the --profiling argument is set to false", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "controller-manager", + "SubServiceName": "Profiling Configuration", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesControllerManager", + "Description": "This check ensures that profiling is disabled in the Kubernetes Controller Manager, reducing the potential attack surface.", + "Risk": "Enabling profiling can expose detailed system and program information, which could be exploited if accessed by unauthorized users.", + "RelatedUrl": "https://github.com/kubernetes/community/blob/master/contributors/devel/profiling.md", + "Remediation": { + "Code": { + "CLI": "Edit the controller-manager manifest to set the --profiling argument to false.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Disable profiling in the Kubernetes Controller Manager for enhanced security.", + "Url": "https://kubernetes.io/docs/admin/kube-controller-manager/" + } + }, + "Categories": [ + "Security Best Practices" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Profiling should be turned off unless it is explicitly required for troubleshooting performance issues." +} diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_disable_profiling/controllermanager_disable_profiling.py b/prowler/providers/kubernetes/services/controllermanager/controllermanager_disable_profiling/controllermanager_disable_profiling.py new file mode 100644 index 0000000000..81457472a0 --- /dev/null +++ b/prowler/providers/kubernetes/services/controllermanager/controllermanager_disable_profiling/controllermanager_disable_profiling.py @@ -0,0 +1,24 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.controllermanager.controllermanager_client import ( + controllermanager_client, +) + + +class controllermanager_disable_profiling(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in controllermanager_client.controllermanager_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "FAIL" + report.status_extended = ( + f"Controller Manager has profiling enabled in pod {pod.name}." + ) + for container in pod.containers.values(): + if "--profiling=false" in str(container.command): + report.status = "PASS" + report.status_extended = f"Controller Manager does not have profiling enabled in pod {pod.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/controllermanager_garbage_collection.metadata.json b/prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/controllermanager_garbage_collection.metadata.json index 059e54d5d2..678898200d 100644 --- a/prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/controllermanager_garbage_collection.metadata.json +++ b/prowler/providers/kubernetes/services/controllermanager/controllermanager_garbage_collection/controllermanager_garbage_collection.metadata.json @@ -6,7 +6,7 @@ "Resource Management", "Performance Optimization" ], - "ServiceName": "kube-controller-manager", + "ServiceName": "controller-manager", "SubServiceName": "", "ResourceIdTemplate": "", "Severity": "medium", diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_root_ca_file_set/__init__.py b/prowler/providers/kubernetes/services/controllermanager/controllermanager_root_ca_file_set/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_root_ca_file_set/controllermanager_root_ca_file_set.metadata.json b/prowler/providers/kubernetes/services/controllermanager/controllermanager_root_ca_file_set/controllermanager_root_ca_file_set.metadata.json new file mode 100644 index 0000000000..1ffdefc99f --- /dev/null +++ b/prowler/providers/kubernetes/services/controllermanager/controllermanager_root_ca_file_set/controllermanager_root_ca_file_set.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "controllermanager_root_ca_file_set", + "CheckTitle": "Ensure that the --root-ca-file argument is set as appropriate", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "controller-manager", + "SubServiceName": "Root CA File Configuration", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesControllerManager", + "Description": "This check verifies that the Kubernetes Controller Manager is configured with the --root-ca-file argument set to a certificate bundle file, allowing pods to verify the API server's serving certificate.", + "Risk": "Not setting the root CA file can expose pods to man-in-the-middle attacks due to unverified TLS connections to the API server.", + "RelatedUrl": "https://github.com/kubernetes/kubernetes/issues/11000", + "Remediation": { + "Code": { + "CLI": "Edit the controller-manager manifest to set the --root-ca-file argument to the appropriate certificate bundle file.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Configure the Controller Manager with a root CA file to enhance security for pods communicating with the API server.", + "Url": "https://kubernetes.io/docs/admin/kube-controller-manager/" + } + }, + "Categories": [ + "Data Security", + "Network Security" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Ensure that the certificate bundle file is properly maintained and updated as needed." +} diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_root_ca_file_set/controllermanager_root_ca_file_set.py b/prowler/providers/kubernetes/services/controllermanager/controllermanager_root_ca_file_set/controllermanager_root_ca_file_set.py new file mode 100644 index 0000000000..15fddec850 --- /dev/null +++ b/prowler/providers/kubernetes/services/controllermanager/controllermanager_root_ca_file_set/controllermanager_root_ca_file_set.py @@ -0,0 +1,24 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.controllermanager.controllermanager_client import ( + controllermanager_client, +) + + +class controllermanager_root_ca_file_set(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in controllermanager_client.controllermanager_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "FAIL" + report.status_extended = ( + f"Controller Manager has the root CA file set in pod {pod.name}." + ) + for container in pod.containers.values(): + if "--root-ca-file=" in str(container.command): + report.status = "PASS" + report.status_extended = f"Controller Manager does not have the root CA file set in pod {pod.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_rotate_kubelet_server_cert/__init__.py b/prowler/providers/kubernetes/services/controllermanager/controllermanager_rotate_kubelet_server_cert/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_rotate_kubelet_server_cert/controllermanager_rotate_kubelet_server_cert.metadata.json b/prowler/providers/kubernetes/services/controllermanager/controllermanager_rotate_kubelet_server_cert/controllermanager_rotate_kubelet_server_cert.metadata.json new file mode 100644 index 0000000000..bf60d0d8c7 --- /dev/null +++ b/prowler/providers/kubernetes/services/controllermanager/controllermanager_rotate_kubelet_server_cert/controllermanager_rotate_kubelet_server_cert.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "controllermanager_rotate_kubelet_server_cert", + "CheckTitle": "Ensure that the RotateKubeletServerCertificate argument is set to true", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "controller-manager", + "SubServiceName": "Kubelet Server Certificate Rotation", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesControllerManager", + "Description": "This check ensures that the Kubernetes Controller Manager is configured with the RotateKubeletServerCertificate argument set to true, enabling automated rotation of kubelet server certificates.", + "Risk": "Not enabling kubelet server certificate rotation could lead to downtime due to expired certificates.", + "RelatedUrl": "https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#approval-controller", + "Remediation": { + "Code": { + "CLI": "Edit the controller-manager manifest to include RotateKubeletServerCertificate=true in the --feature-gates parameter.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Enable kubelet server certificate rotation in the Controller Manager for automated certificate management.", + "Url": "https://kubernetes.io/docs/admin/kube-controller-manager/" + } + }, + "Categories": [ + "Data Security", + "Configuration Management" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Ensure that your cluster setup supports kubelet server certificate rotation." +} diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_rotate_kubelet_server_cert/controllermanager_rotate_kubelet_server_cert.py b/prowler/providers/kubernetes/services/controllermanager/controllermanager_rotate_kubelet_server_cert/controllermanager_rotate_kubelet_server_cert.py new file mode 100644 index 0000000000..96491cc000 --- /dev/null +++ b/prowler/providers/kubernetes/services/controllermanager/controllermanager_rotate_kubelet_server_cert/controllermanager_rotate_kubelet_server_cert.py @@ -0,0 +1,26 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.controllermanager.controllermanager_client import ( + controllermanager_client, +) + + +class controllermanager_rotate_kubelet_server_cert(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in controllermanager_client.controllermanager_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "FAIL" + report.status_extended = f"Controller Manager does not have RotateKubeletServerCertificate set to true in pod {pod.name}." + for container in pod.containers.values(): + for command in container.command: + if command.startswith("--feature-gates"): + if "RotateKubeletServerCertificate=true" in ( + command.split("=")[1] + ): + report.status = "PASS" + report.status_extended = f"Controller Manager has RotateKubeletServerCertificate set to true in pod {pod.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_credentials/__init__.py b/prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_credentials/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_credentials/controllermanager_service_account_credentials.metadata.json b/prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_credentials/controllermanager_service_account_credentials.metadata.json new file mode 100644 index 0000000000..e5cb129205 --- /dev/null +++ b/prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_credentials/controllermanager_service_account_credentials.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "controllermanager_service_account_credentials", + "CheckTitle": "Ensure that the --use-service-account-credentials argument is set to true", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "controller-manager", + "SubServiceName": "Service Account Credentials", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesControllerManager", + "Description": "This check verifies that the Kubernetes Controller Manager is configured to use individual service account credentials for each controller, enhancing the security and role separation within the Kubernetes system.", + "Risk": "Not using individual service account credentials can lead to overly broad permissions and potential security risks.", + "RelatedUrl": "https://kubernetes.io/docs/admin/service-accounts-admin/", + "Remediation": { + "Code": { + "CLI": "Edit the controller-manager manifest to set the --use-service-account-credentials argument to true.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Configure the Controller Manager to use individual service account credentials for enhanced security and role separation.", + "Url": "https://kubernetes.io/docs/admin/kube-controller-manager/" + } + }, + "Categories": [ + "Security Best Practices", + "Access Control" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Ensure that appropriate roles and permissions are set for each service account when enabling this feature." +} diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_credentials/controllermanager_service_account_credentials.py b/prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_credentials/controllermanager_service_account_credentials.py new file mode 100644 index 0000000000..cd6feea1f2 --- /dev/null +++ b/prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_credentials/controllermanager_service_account_credentials.py @@ -0,0 +1,22 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.controllermanager.controllermanager_client import ( + controllermanager_client, +) + + +class controllermanager_service_account_credentials(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in controllermanager_client.controllermanager_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "FAIL" + report.status_extended = f"Controller Manager is using service account credentials in pod {pod.name}." + for container in pod.containers.values(): + if "--use-service-account-credentials=true" in str(container.command): + report.status = "PASS" + report.status_extended = f"Controller Manager is not using service account credentials in pod {pod.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_private_key_file/__init__.py b/prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_private_key_file/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_private_key_file/controllermanager_service_account_private_key_file.metadata.json b/prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_private_key_file/controllermanager_service_account_private_key_file.metadata.json new file mode 100644 index 0000000000..b7bbeb68d8 --- /dev/null +++ b/prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_private_key_file/controllermanager_service_account_private_key_file.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "controllermanager_service_account_private_key_file", + "CheckTitle": "Ensure that the --service-account-private-key-file argument is set as appropriate", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "controller-manager", + "SubServiceName": "Service Account Private Key File Configuration", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesControllerManager", + "Description": "This check ensures that the Kubernetes Controller Manager is configured with the --service-account-private-key-file argument set to the private key file for service accounts.", + "Risk": "Not setting a private key file for service accounts can hinder the ability to securely rotate service account tokens.", + "RelatedUrl": "https://kubernetes.io/docs/admin/kube-controller-manager/", + "Remediation": { + "Code": { + "CLI": "Edit the controller-manager manifest to set the --service-account-private-key-file argument to the appropriate private key file.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Configure the Controller Manager with a private key file for service accounts to maintain security and enable token rotation.", + "Url": "https://kubernetes.io/docs/admin/kube-controller-manager/" + } + }, + "Categories": [ + "Data Security", + "Configuration Management" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Ensure the private key file is securely maintained and periodically rotated as per the organization's policy." +} diff --git a/prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_private_key_file/controllermanager_service_account_private_key_file.py b/prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_private_key_file/controllermanager_service_account_private_key_file.py new file mode 100644 index 0000000000..9ac329b35f --- /dev/null +++ b/prowler/providers/kubernetes/services/controllermanager/controllermanager_service_account_private_key_file/controllermanager_service_account_private_key_file.py @@ -0,0 +1,22 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.controllermanager.controllermanager_client import ( + controllermanager_client, +) + + +class controllermanager_service_account_private_key_file(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in controllermanager_client.controllermanager_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "FAIL" + report.status_extended = f"Controller Manager has the service account private key file set in pod {pod.name}." + for container in pod.containers.values(): + if "--service-account-private-key-file=" in str(container.command): + report.status = "PASS" + report.status_extended = f"Controller Manager does not have the service account private key file set in pod {pod.name}." + findings.append(report) + return findings From 0ce5cdc555141e10e6951734ebe96d4815e79c13 Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Wed, 17 Jan 2024 14:14:17 +0100 Subject: [PATCH 10/21] feat(etcd): add checks for Kubernetes etcd --- .../etcd/etcd_client_cert_auth/__init__.py | 0 .../etcd_client_cert_auth.metadata.json | 36 +++++++++++++++++++ .../etcd_client_cert_auth.py | 21 +++++++++++ .../etcd/etcd_no_auto_tls/__init__.py | 0 .../etcd_no_auto_tls.metadata.json | 36 +++++++++++++++++++ .../etcd/etcd_no_auto_tls/etcd_no_auto_tls.py | 22 ++++++++++++ .../etcd/etcd_no_peer_auto_tls/__init__.py | 0 .../etcd_no_peer_auto_tls.metadata.json | 36 +++++++++++++++++++ .../etcd_no_peer_auto_tls.py | 22 ++++++++++++ .../etcd_peer_client_cert_auth/__init__.py | 0 .../etcd_peer_client_cert_auth.metadata.json | 36 +++++++++++++++++++ .../etcd_peer_client_cert_auth.py | 21 +++++++++++ .../etcd/etcd_peer_tls_config/__init__.py | 0 .../etcd_peer_tls_config.metadata.json | 36 +++++++++++++++++++ .../etcd_peer_tls_config.py | 23 ++++++++++++ .../services/etcd/etcd_unique_ca/__init__.py | 0 .../etcd_unique_ca.metadata.json | 36 +++++++++++++++++++ .../etcd/etcd_unique_ca/etcd_unique_ca.py | 34 ++++++++++++++++++ .../scheduler_bind_address/__init__.py | 0 .../scheduler_bind_address.metadata.json | 36 +++++++++++++++++++ .../scheduler_bind_address.py | 26 ++++++++++++++ 21 files changed, 421 insertions(+) create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/__init__.py create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.metadata.json create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.py create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/__init__.py create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/__init__.py create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/__init__.py create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.metadata.json create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.py create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/__init__.py create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.metadata.json create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.py create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_unique_ca/__init__.py create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json create mode 100644 prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.py create mode 100644 prowler/providers/kubernetes/services/scheduler/scheduler_bind_address/__init__.py create mode 100644 prowler/providers/kubernetes/services/scheduler/scheduler_bind_address/scheduler_bind_address.metadata.json create mode 100644 prowler/providers/kubernetes/services/scheduler/scheduler_bind_address/scheduler_bind_address.py diff --git a/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/__init__.py b/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.metadata.json new file mode 100644 index 0000000000..533b95df03 --- /dev/null +++ b/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "etcd_client_cert_auth", + "CheckTitle": "Ensure that the --client-cert-auth argument is set to true for etcd", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "etcd", + "SubServiceName": "Client Certificate Authentication", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "EtcdService", + "Description": "This check ensures that client authentication is enabled for the etcd service, which is a key-value store used by Kubernetes for persistent storage of all REST API objects. Enabling client authentication helps in securing access to etcd.", + "Risk": "If --client-cert-auth is not set to true, etcd service may be accessible by unauthenticated clients, posing a significant security risk.", + "RelatedUrl": "https://coreos.com/etcd/docs/latest/op-guide/security.html", + "Remediation": { + "Code": { + "CLI": "Edit the etcd pod specification file to set --client-cert-auth to true. Example: --client-cert-auth=\"true\".", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Enable client certificate authentication for the etcd service for improved security.", + "Url": "https://coreos.com/etcd/docs/latest/op-guide/security.html" + } + }, + "Categories": [ + "Data Security", + "Access Control" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Ensure that all clients communicating with etcd have valid certificates." +} diff --git a/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.py b/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.py new file mode 100644 index 0000000000..f8796a858e --- /dev/null +++ b/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.py @@ -0,0 +1,21 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.etcd.etcd_client import etcd_client + + +class etcd_client_cert_auth(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in etcd_client.etcd_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "FAIL" + report.status_extended = f"Etcd does not have client certificate authentication enabled in pod {pod.name}." + for container in pod.containers.values(): + if "--client-cert-auth=true" in str(container.command): + + report.status = "PASS" + report.status_extended = f"Etcd has client certificate authentication enabled in pod {pod.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/__init__.py b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json new file mode 100644 index 0000000000..8982fb4f4d --- /dev/null +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "etcd_no_auto_tls", + "CheckTitle": "Ensure that the --auto-tls argument is not set to true for etcd", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "etcd", + "SubServiceName": "TLS Configuration", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "EtcdService", + "Description": "This check ensures that etcd does not use self-signed certificates for TLS, which are less secure than certificates from a trusted authority. Avoiding self-signed certificates enhances the security of etcd.", + "Risk": "Using --auto-tls=true may result in the use of self-signed certificates, reducing the overall security of the etcd service.", + "RelatedUrl": "https://coreos.com/etcd/docs/latest/op-guide/security.html", + "Remediation": { + "Code": { + "CLI": "Edit the etcd pod specification file to set --auto-tls to false or remove the parameter. Example: --auto-tls=false.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Ensure etcd is not using self-signed certificates for TLS.", + "Url": "https://coreos.com/etcd/docs/latest/op-guide/security.html" + } + }, + "Categories": [ + "Data Security", + "Network Security" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Self-signed certificates should be replaced with certificates from a trusted certificate authority." +} diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py new file mode 100644 index 0000000000..dd6a819f4c --- /dev/null +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py @@ -0,0 +1,22 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.etcd.etcd_client import etcd_client + + +class etcd_no_auto_tls(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in etcd_client.etcd_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = f"Etcd is not configured to use self-signed certificates for TLS in pod {pod.name}." + for container in pod.containers.values(): + if "--auto-tls=" in str(container.command) and "--auto-tls=true" in str( + container.command + ): + report.status = "FAIL" + report.status_extended = f"Etcd is configured to use self-signed certificates for TLS in pod {pod.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/__init__.py b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json new file mode 100644 index 0000000000..cf5e618784 --- /dev/null +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "etcd_no_peer_auto_tls", + "CheckTitle": "Ensure that the --peer-auto-tls argument is not set to true for etcd", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "etcd", + "SubServiceName": "Peer TLS Configuration", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "EtcdService", + "Description": "This check ensures that etcd is not configured to use automatically generated self-signed certificates for TLS connections between peers. Using self-signed certificates for peer authentication is discouraged in a production environment.", + "Risk": "Using self-signed certificates can lead to insecure communications between etcd peers, compromising data security.", + "RelatedUrl": "https://coreos.com/etcd/docs/latest/op-guide/security.html", + "Remediation": { + "Code": { + "CLI": "Configure etcd to avoid using self-signed certificates for peer connections by editing the etcd pod specification file with the --peer-auto-tls parameter set to false. Example: --peer-auto-tls=false.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Ensure etcd is not using automatically generated self-signed certificates for peer TLS connections.", + "Url": "https://coreos.com/etcd/docs/latest/op-guide/security.html" + } + }, + "Categories": [ + "Data Security", + "Network Security" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "This check is applicable only for etcd clusters. For single etcd server setups, this recommendation does not apply." +} diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py new file mode 100644 index 0000000000..fe34e6c8de --- /dev/null +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py @@ -0,0 +1,22 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.etcd.etcd_client import etcd_client + + +class etcd_no_peer_auto_tls(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in etcd_client.etcd_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "PASS" + report.status_extended = f"Etcd is not using automatically generated self-signed certificates for peer TLS connections in pod {pod.name}." + for container in pod.containers.values(): + if "--peer-auto-tls=" in str( + container.command + ) and "--peer-auto-tls=true" in str(container.command): + report.status = "FAIL" + report.status_extended = f"Etcd is using automatically generated self-signed certificates for TLS connections in pod {pod.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/__init__.py b/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.metadata.json new file mode 100644 index 0000000000..ec9584f584 --- /dev/null +++ b/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "etcd_peer_client_cert_auth", + "CheckTitle": "Ensure that the --peer-client-cert-auth argument is set to true for etcd", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "etcd", + "SubServiceName": "Peer Client Certificate Authentication", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "EtcdService", + "Description": "This check ensures that etcd is configured for peer authentication by verifying that the --peer-client-cert-auth argument is set to true. This configuration is crucial to ensure that etcd peers in the cluster are authenticated and secure.", + "Risk": "Failing to configure peer client authentication can lead to unauthorized access to the etcd cluster, compromising sensitive data.", + "RelatedUrl": "https://coreos.com/etcd/docs/latest/op-guide/security.html", + "Remediation": { + "Code": { + "CLI": "Configure peer client certificate authentication by editing the etcd pod specification file with the --peer-client-cert-auth parameter set to true. Example: --peer-client-cert-auth=true.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Ensure etcd is configured for peer client certificate authentication.", + "Url": "https://coreos.com/etcd/docs/latest/op-guide/security.html" + } + }, + "Categories": [ + "Data Security", + "Network Security" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "This check is applicable only for etcd clusters. For single etcd server setups, this recommendation does not apply." +} diff --git a/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.py b/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.py new file mode 100644 index 0000000000..0c6f7bc198 --- /dev/null +++ b/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.py @@ -0,0 +1,21 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.etcd.etcd_client import etcd_client + + +class etcd_peer_client_cert_auth(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in etcd_client.etcd_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "FAIL" + report.status_extended = f"Etcd does not have peer client certificate authentication configured in pod {pod.name}." + for container in pod.containers.values(): + if "--peer-client-cert-auth=true" in str(container.command): + + report.status = "PASS" + report.status_extended = f"Etcd is configured for peer client certificate authentication in pod {pod.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/__init__.py b/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.metadata.json new file mode 100644 index 0000000000..da2c4d094b --- /dev/null +++ b/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "etcd_peer_tls_config", + "CheckTitle": "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate for etcd", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "etcd", + "SubServiceName": "Peer TLS Configuration", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "EtcdService", + "Description": "This check ensures that etcd is configured to use TLS encryption for peer connections, which is crucial for securing sensitive data stored in etcd. It verifies the presence of peer certificate and key file arguments in etcd configuration.", + "Risk": "Not configuring TLS for peer connections in etcd can lead to potential data breaches and unauthorized access.", + "RelatedUrl": "https://coreos.com/etcd/docs/latest/op-guide/security.html", + "Remediation": { + "Code": { + "CLI": "Configure peer TLS encryption by editing the etcd pod specification file with appropriate certificate and key files. Example: --peer-client-file= --peer-key-file=.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Secure etcd peer connections with TLS encryption.", + "Url": "https://coreos.com/etcd/docs/latest/op-guide/security.html" + } + }, + "Categories": [ + "Data Security", + "Network Security" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "This check is only applicable for etcd clusters. For single etcd server setups, this recommendation does not apply." +} diff --git a/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.py b/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.py new file mode 100644 index 0000000000..c4c3db75fb --- /dev/null +++ b/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.py @@ -0,0 +1,23 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.etcd.etcd_client import etcd_client + + +class etcd_peer_tls_config(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in etcd_client.etcd_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "FAIL" + report.status_extended = f"Etcd does not have TLS configured for peer connections in pod {pod.name}." + for container in pod.containers.values(): + if "--peer-cert-file" in str( + container.command + ) and "--peer-key-file" in str(container.command): + + report.status = "PASS" + report.status_extended = f"Etcd is configured with TLS for peer connections in pod {pod.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/__init__.py b/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json new file mode 100644 index 0000000000..26a1aab1e3 --- /dev/null +++ b/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "etcd_unique_ca", + "CheckTitle": "Ensure that a unique Certificate Authority is used for etcd", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "etcd", + "SubServiceName": "Certificate Authority Configuration", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "EtcdService", + "Description": "This check ensures that etcd uses a unique Certificate Authority (CA) separate from the one used for the overall Kubernetes cluster. This practice enhances the security by restricting access to the etcd database only to clients and peers with certificates issued by the dedicated etcd CA.", + "Risk": "Using the same CA for etcd and the Kubernetes cluster can expose etcd to unauthorized access if any certificate issued by the Kubernetes CA is compromised.", + "RelatedUrl": "https://coreos.com/etcd/docs/latest/op-guide/security.html", + "Remediation": { + "Code": { + "CLI": "Configure etcd to use a unique CA by setting the --trusted-ca-file parameter in the etcd pod specification to point to the dedicated etcd CA file. Example: --trusted-ca-file=.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Ensure etcd uses a unique CA separate from the Kubernetes cluster CA.", + "Url": "https://coreos.com/etcd/docs/latest/op-guide/security.html" + } + }, + "Categories": [ + "Data Security", + "Configuration Management" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "This check is particularly important in environments where strict access control to the etcd database is required." +} diff --git a/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.py b/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.py new file mode 100644 index 0000000000..b458fd248d --- /dev/null +++ b/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.py @@ -0,0 +1,34 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.apiserver.apiserver_client import ( + apiserver_client, +) +from prowler.providers.kubernetes.services.etcd.etcd_client import etcd_client + + +class etcd_unique_ca(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + # Get first the CA Files of the apiserver pods + apiserver_ca_files = [] + for pod in apiserver_client.apiserver_pods: + for container in pod.containers.values(): + for command in container.command: + if command.startswith("--client-ca-file"): + apiserver_ca_files.append(command.split("=")[1]) + for pod in etcd_client.etcd_pods: + etcd_ca_file = "" + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "FAIL" + report.status_extended = f"Etcd does not use a unique CA, which could compromise its security in pod {pod.name}." + for container in pod.containers.values(): + for command in container.command: + if command.startswith("--trusted-ca-file"): + etcd_ca_file = command.split("=")[1] + if etcd_ca_file not in apiserver_ca_files: + report.status = "PASS" + report.status_extended = f"Etcd uses a unique CA separate from the Kubernetes cluster CA in pod {pod.name}." + findings.append(report) + return findings diff --git a/prowler/providers/kubernetes/services/scheduler/scheduler_bind_address/__init__.py b/prowler/providers/kubernetes/services/scheduler/scheduler_bind_address/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/prowler/providers/kubernetes/services/scheduler/scheduler_bind_address/scheduler_bind_address.metadata.json b/prowler/providers/kubernetes/services/scheduler/scheduler_bind_address/scheduler_bind_address.metadata.json new file mode 100644 index 0000000000..2ae81fdb1b --- /dev/null +++ b/prowler/providers/kubernetes/services/scheduler/scheduler_bind_address/scheduler_bind_address.metadata.json @@ -0,0 +1,36 @@ +{ + "Provider": "kubernetes", + "CheckID": "scheduler_bind_address", + "CheckTitle": "Ensure that the --bind-address argument is set to 127.0.0.1 for the Scheduler", + "CheckType": [ + "Security", + "Configuration" + ], + "ServiceName": "scheduler", + "SubServiceName": "Bind Address Configuration", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "KubernetesScheduler", + "Description": "This check ensures that the Kubernetes Scheduler is bound to the loopback address (127.0.0.1) to minimize the cluster's attack surface. Binding to the loopback address prevents unauthorized network access to the Scheduler's health and metrics information.", + "Risk": "Binding the Scheduler to a non-loopback address exposes sensitive health and metrics information without authentication or encryption.", + "RelatedUrl": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/", + "Remediation": { + "Code": { + "CLI": "Edit the kube-scheduler configuration to set --bind-address to 127.0.0.1.", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "Bind the Scheduler to the loopback address for enhanced security.", + "Url": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/" + } + }, + "Categories": [ + "Network Security", + "Configuration Management" + ], + "DependsOn": [], + "RelatedTo": [], + "Notes": "Ensure compatibility with the Kubernetes version in use, as command-line flags may differ." +} diff --git a/prowler/providers/kubernetes/services/scheduler/scheduler_bind_address/scheduler_bind_address.py b/prowler/providers/kubernetes/services/scheduler/scheduler_bind_address/scheduler_bind_address.py new file mode 100644 index 0000000000..342a562782 --- /dev/null +++ b/prowler/providers/kubernetes/services/scheduler/scheduler_bind_address/scheduler_bind_address.py @@ -0,0 +1,26 @@ +from prowler.lib.check.models import Check, Check_Report_Kubernetes +from prowler.providers.kubernetes.services.scheduler.scheduler_client import ( + scheduler_client, +) + + +class scheduler_bind_address(Check): + def execute(self) -> Check_Report_Kubernetes: + findings = [] + for pod in scheduler_client.scheduler_pods: + report = Check_Report_Kubernetes(self.metadata()) + report.namespace = pod.namespace + report.resource_name = pod.name + report.resource_id = pod.uid + report.status = "FAIL" + report.status_extended = ( + f"Scheduler is not bound to the loopback address in pod {pod.name}." + ) + for container in pod.containers.values(): + if "--bind-address=127.0.0.1" in str(container.command): + report.status = "PASS" + report.status_extended = ( + f"Scheduler is bound to the loopback address in pod {pod.name}." + ) + findings.append(report) + return findings From 00e22737133cc38b8a42e90222e17c815e0623fb Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Thu, 22 Feb 2024 15:11:35 +0000 Subject: [PATCH 11/21] resolve comments --- .../etcd_client_cert_auth.metadata.json | 4 ++-- .../etcd_client_cert_auth.py | 14 ++++++++------ .../etcd_no_auto_tls.metadata.json | 4 ++-- .../etcd/etcd_no_auto_tls/etcd_no_auto_tls.py | 1 + .../etcd_no_peer_auto_tls.metadata.json | 4 ++-- .../etcd_no_peer_auto_tls.py | 1 + .../etcd_peer_client_cert_auth.metadata.json | 4 ++-- .../etcd_peer_client_cert_auth.py | 12 ++++++------ .../etcd_peer_tls_config.metadata.json | 4 ++-- .../etcd_peer_tls_config/etcd_peer_tls_config.py | 16 +++++++++------- .../etcd_unique_ca/etcd_unique_ca.metadata.json | 4 ++-- .../scheduler_bind_address.py | 13 ++++++------- 12 files changed, 43 insertions(+), 38 deletions(-) diff --git a/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.metadata.json index 533b95df03..1a7de863ac 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "EtcdService", "Description": "This check ensures that client authentication is enabled for the etcd service, which is a key-value store used by Kubernetes for persistent storage of all REST API objects. Enabling client authentication helps in securing access to etcd.", "Risk": "If --client-cert-auth is not set to true, etcd service may be accessible by unauthenticated clients, posing a significant security risk.", - "RelatedUrl": "https://coreos.com/etcd/docs/latest/op-guide/security.html", + "RelatedUrl": "https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/", "Remediation": { "Code": { "CLI": "Edit the etcd pod specification file to set --client-cert-auth to true. Example: --client-cert-auth=\"true\".", @@ -23,7 +23,7 @@ }, "Recommendation": { "Text": "Enable client certificate authentication for the etcd service for improved security.", - "Url": "https://coreos.com/etcd/docs/latest/op-guide/security.html" + "Url": "https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#limiting-access-of-etcd-clusters" } }, "Categories": [ diff --git a/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.py b/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.py index f8796a858e..152d05c51c 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.py @@ -10,12 +10,14 @@ def execute(self) -> Check_Report_Kubernetes: report.namespace = pod.namespace report.resource_name = pod.name report.resource_id = pod.uid - report.status = "FAIL" - report.status_extended = f"Etcd does not have client certificate authentication enabled in pod {pod.name}." + report.status = "PASS" + report.status_extended = ( + f"Etcd has client certificate authentication enabled in pod {pod.name}." + ) for container in pod.containers.values(): - if "--client-cert-auth=true" in str(container.command): - - report.status = "PASS" - report.status_extended = f"Etcd has client certificate authentication enabled in pod {pod.name}." + if "--client-cert-auth=true" not in str(container.command): + report.status = "FAIL" + report.status_extended = f"Etcd does not have client certificate authentication enabled in pod {pod.name}." + break findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json index 8982fb4f4d..ab321876f5 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "EtcdService", "Description": "This check ensures that etcd does not use self-signed certificates for TLS, which are less secure than certificates from a trusted authority. Avoiding self-signed certificates enhances the security of etcd.", "Risk": "Using --auto-tls=true may result in the use of self-signed certificates, reducing the overall security of the etcd service.", - "RelatedUrl": "https://coreos.com/etcd/docs/latest/op-guide/security.html", + "RelatedUrl": "https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/", "Remediation": { "Code": { "CLI": "Edit the etcd pod specification file to set --auto-tls to false or remove the parameter. Example: --auto-tls=false.", @@ -23,7 +23,7 @@ }, "Recommendation": { "Text": "Ensure etcd is not using self-signed certificates for TLS.", - "Url": "https://coreos.com/etcd/docs/latest/op-guide/security.html" + "Url": "https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/" } }, "Categories": [ diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py index dd6a819f4c..fd6b1d2f4a 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py @@ -18,5 +18,6 @@ def execute(self) -> Check_Report_Kubernetes: ): report.status = "FAIL" report.status_extended = f"Etcd is configured to use self-signed certificates for TLS in pod {pod.name}." + break findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json index cf5e618784..211170305c 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "EtcdService", "Description": "This check ensures that etcd is not configured to use automatically generated self-signed certificates for TLS connections between peers. Using self-signed certificates for peer authentication is discouraged in a production environment.", "Risk": "Using self-signed certificates can lead to insecure communications between etcd peers, compromising data security.", - "RelatedUrl": "https://coreos.com/etcd/docs/latest/op-guide/security.html", + "RelatedUrl": "https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/", "Remediation": { "Code": { "CLI": "Configure etcd to avoid using self-signed certificates for peer connections by editing the etcd pod specification file with the --peer-auto-tls parameter set to false. Example: --peer-auto-tls=false.", @@ -23,7 +23,7 @@ }, "Recommendation": { "Text": "Ensure etcd is not using automatically generated self-signed certificates for peer TLS connections.", - "Url": "https://coreos.com/etcd/docs/latest/op-guide/security.html" + "Url": "https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/" } }, "Categories": [ diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py index fe34e6c8de..c13eb32b44 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py @@ -18,5 +18,6 @@ def execute(self) -> Check_Report_Kubernetes: ) and "--peer-auto-tls=true" in str(container.command): report.status = "FAIL" report.status_extended = f"Etcd is using automatically generated self-signed certificates for TLS connections in pod {pod.name}." + break findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.metadata.json index ec9584f584..c6ad90df7b 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "EtcdService", "Description": "This check ensures that etcd is configured for peer authentication by verifying that the --peer-client-cert-auth argument is set to true. This configuration is crucial to ensure that etcd peers in the cluster are authenticated and secure.", "Risk": "Failing to configure peer client authentication can lead to unauthorized access to the etcd cluster, compromising sensitive data.", - "RelatedUrl": "https://coreos.com/etcd/docs/latest/op-guide/security.html", + "RelatedUrl": "https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/", "Remediation": { "Code": { "CLI": "Configure peer client certificate authentication by editing the etcd pod specification file with the --peer-client-cert-auth parameter set to true. Example: --peer-client-cert-auth=true.", @@ -23,7 +23,7 @@ }, "Recommendation": { "Text": "Ensure etcd is configured for peer client certificate authentication.", - "Url": "https://coreos.com/etcd/docs/latest/op-guide/security.html" + "Url": "https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#limiting-access-of-etcd-clusters" } }, "Categories": [ diff --git a/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.py b/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.py index 0c6f7bc198..f844efffe1 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.py @@ -10,12 +10,12 @@ def execute(self) -> Check_Report_Kubernetes: report.namespace = pod.namespace report.resource_name = pod.name report.resource_id = pod.uid - report.status = "FAIL" - report.status_extended = f"Etcd does not have peer client certificate authentication configured in pod {pod.name}." + report.status = "PASS" + report.status_extended = f"Etcd is configured for peer client certificate authentication in pod {pod.name}." for container in pod.containers.values(): - if "--peer-client-cert-auth=true" in str(container.command): - - report.status = "PASS" - report.status_extended = f"Etcd is configured for peer client certificate authentication in pod {pod.name}." + if "--peer-client-cert-auth=true" not in str(container.command): + report.status = "FAIL" + report.status_extended = f"Etcd does not have peer client certificate authentication configured in pod {pod.name}." + break findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.metadata.json index da2c4d094b..06cc4c86a8 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "EtcdService", "Description": "This check ensures that etcd is configured to use TLS encryption for peer connections, which is crucial for securing sensitive data stored in etcd. It verifies the presence of peer certificate and key file arguments in etcd configuration.", "Risk": "Not configuring TLS for peer connections in etcd can lead to potential data breaches and unauthorized access.", - "RelatedUrl": "https://coreos.com/etcd/docs/latest/op-guide/security.html", + "RelatedUrl": "https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd", "Remediation": { "Code": { "CLI": "Configure peer TLS encryption by editing the etcd pod specification file with appropriate certificate and key files. Example: --peer-client-file= --peer-key-file=.", @@ -23,7 +23,7 @@ }, "Recommendation": { "Text": "Secure etcd peer connections with TLS encryption.", - "Url": "https://coreos.com/etcd/docs/latest/op-guide/security.html" + "Url": "https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#securing-communication" } }, "Categories": [ diff --git a/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.py b/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.py index c4c3db75fb..d60476192b 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.py @@ -10,14 +10,16 @@ def execute(self) -> Check_Report_Kubernetes: report.namespace = pod.namespace report.resource_name = pod.name report.resource_id = pod.uid - report.status = "FAIL" - report.status_extended = f"Etcd does not have TLS configured for peer connections in pod {pod.name}." + report.status = "PASS" + report.status_extended = ( + f"Etcd is configured with TLS for peer connections in pod {pod.name}." + ) for container in pod.containers.values(): - if "--peer-cert-file" in str( + if "--peer-cert-file" not in str( container.command - ) and "--peer-key-file" in str(container.command): - - report.status = "PASS" - report.status_extended = f"Etcd is configured with TLS for peer connections in pod {pod.name}." + ) or "--peer-key-file" not in str(container.command): + report.status = "FAIL" + report.status_extended = f"Etcd does not have TLS configured for peer connections in pod {pod.name}." + break findings.append(report) return findings diff --git a/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json index 26a1aab1e3..881cff344a 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "EtcdService", "Description": "This check ensures that etcd uses a unique Certificate Authority (CA) separate from the one used for the overall Kubernetes cluster. This practice enhances the security by restricting access to the etcd database only to clients and peers with certificates issued by the dedicated etcd CA.", "Risk": "Using the same CA for etcd and the Kubernetes cluster can expose etcd to unauthorized access if any certificate issued by the Kubernetes CA is compromised.", - "RelatedUrl": "https://coreos.com/etcd/docs/latest/op-guide/security.html", + "RelatedUrl": "https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/", "Remediation": { "Code": { "CLI": "Configure etcd to use a unique CA by setting the --trusted-ca-file parameter in the etcd pod specification to point to the dedicated etcd CA file. Example: --trusted-ca-file=.", @@ -23,7 +23,7 @@ }, "Recommendation": { "Text": "Ensure etcd uses a unique CA separate from the Kubernetes cluster CA.", - "Url": "https://coreos.com/etcd/docs/latest/op-guide/security.html" + "Url": "https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#limiting-access-of-etcd-clusters" } }, "Categories": [ diff --git a/prowler/providers/kubernetes/services/scheduler/scheduler_bind_address/scheduler_bind_address.py b/prowler/providers/kubernetes/services/scheduler/scheduler_bind_address/scheduler_bind_address.py index 342a562782..43309ba0f4 100644 --- a/prowler/providers/kubernetes/services/scheduler/scheduler_bind_address/scheduler_bind_address.py +++ b/prowler/providers/kubernetes/services/scheduler/scheduler_bind_address/scheduler_bind_address.py @@ -12,15 +12,14 @@ def execute(self) -> Check_Report_Kubernetes: report.namespace = pod.namespace report.resource_name = pod.name report.resource_id = pod.uid - report.status = "FAIL" + report.status = "PASS" report.status_extended = ( - f"Scheduler is not bound to the loopback address in pod {pod.name}." + f"Scheduler is bound to the loopback address in pod {pod.name}." ) for container in pod.containers.values(): - if "--bind-address=127.0.0.1" in str(container.command): - report.status = "PASS" - report.status_extended = ( - f"Scheduler is bound to the loopback address in pod {pod.name}." - ) + if "--bind-address=127.0.0.1" not in str(container.command): + report.status = "FAIL" + report.status_extended = f"Scheduler is not bound to the loopback address in pod {pod.name}." + break findings.append(report) return findings From 975678918a73f4eef8393c32ce2055c2903ecec8 Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Thu, 22 Feb 2024 15:22:25 +0000 Subject: [PATCH 12/21] fix etcd_unique_ca check --- .../services/etcd/etcd_unique_ca/etcd_unique_ca.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.py b/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.py index b458fd248d..7cf57c1f7b 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.py @@ -16,19 +16,19 @@ def execute(self) -> Check_Report_Kubernetes: if command.startswith("--client-ca-file"): apiserver_ca_files.append(command.split("=")[1]) for pod in etcd_client.etcd_pods: - etcd_ca_file = "" + etcd_ca_files = [] report = Check_Report_Kubernetes(self.metadata()) report.namespace = pod.namespace report.resource_name = pod.name report.resource_id = pod.uid - report.status = "FAIL" - report.status_extended = f"Etcd does not use a unique CA, which could compromise its security in pod {pod.name}." + report.status = "PASS" + report.status_extended = f"Etcd uses a unique CA separate from the Kubernetes cluster CA in pod {pod.name}." for container in pod.containers.values(): for command in container.command: if command.startswith("--trusted-ca-file"): - etcd_ca_file = command.split("=")[1] - if etcd_ca_file not in apiserver_ca_files: - report.status = "PASS" - report.status_extended = f"Etcd uses a unique CA separate from the Kubernetes cluster CA in pod {pod.name}." + etcd_ca_files.append(command.split("=")[1]) + if any(ca in etcd_ca_files for ca in apiserver_ca_files): + report.status = "FAIL" + report.status_extended = f"Etcd does not use a unique CA, which could compromise its security in pod {pod.name}." findings.append(report) return findings From 28e3dc3893c981bba7be3659671bb03d28ad113d Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Thu, 22 Feb 2024 16:03:10 +0000 Subject: [PATCH 13/21] improve etcd_unique_ca --- .../services/etcd/etcd_unique_ca/etcd_unique_ca.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.py b/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.py index 7cf57c1f7b..00ba015a96 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.py @@ -21,14 +21,14 @@ def execute(self) -> Check_Report_Kubernetes: report.namespace = pod.namespace report.resource_name = pod.name report.resource_id = pod.uid - report.status = "PASS" - report.status_extended = f"Etcd uses a unique CA separate from the Kubernetes cluster CA in pod {pod.name}." + report.status = "MANUAL" + report.status_extended = f"Etcd uses a different CA file from the Kubernetes cluster CA in pod {pod.name}, but verify if the content is the same." for container in pod.containers.values(): for command in container.command: if command.startswith("--trusted-ca-file"): etcd_ca_files.append(command.split("=")[1]) if any(ca in etcd_ca_files for ca in apiserver_ca_files): report.status = "FAIL" - report.status_extended = f"Etcd does not use a unique CA, which could compromise its security in pod {pod.name}." + report.status_extended = f"Etcd does not use a unique CA file, which could compromise its security in pod {pod.name}." findings.append(report) return findings From 1639b166bc47d471a44f3b4a57ccec56b84a010d Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Thu, 22 Feb 2024 16:11:07 +0000 Subject: [PATCH 14/21] solve comments --- .../etcd_client_cert_auth/etcd_client_cert_auth.metadata.json | 2 +- .../etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json | 2 +- .../services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py | 4 +--- .../etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json | 2 +- .../etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py | 4 +--- .../etcd_peer_client_cert_auth.metadata.json | 2 +- .../etcd_peer_tls_config/etcd_peer_tls_config.metadata.json | 2 +- .../etcd_tls_encryption/etcd_tls_encryption.metadata.json | 2 +- .../services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json | 2 +- 9 files changed, 9 insertions(+), 13 deletions(-) diff --git a/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.metadata.json index 1a7de863ac..8fe58c1180 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "EtcdService", "Description": "This check ensures that client authentication is enabled for the etcd service, which is a key-value store used by Kubernetes for persistent storage of all REST API objects. Enabling client authentication helps in securing access to etcd.", "Risk": "If --client-cert-auth is not set to true, etcd service may be accessible by unauthenticated clients, posing a significant security risk.", - "RelatedUrl": "https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/", + "RelatedUrl": "https://etcd.io/docs/v3.2/op-guide/security/", "Remediation": { "Code": { "CLI": "Edit the etcd pod specification file to set --client-cert-auth to true. Example: --client-cert-auth=\"true\".", diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json index ab321876f5..0ee3b4b131 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "EtcdService", "Description": "This check ensures that etcd does not use self-signed certificates for TLS, which are less secure than certificates from a trusted authority. Avoiding self-signed certificates enhances the security of etcd.", "Risk": "Using --auto-tls=true may result in the use of self-signed certificates, reducing the overall security of the etcd service.", - "RelatedUrl": "https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/", + "RelatedUrl": "https://etcd.io/docs/v3.2/op-guide/security/", "Remediation": { "Code": { "CLI": "Edit the etcd pod specification file to set --auto-tls to false or remove the parameter. Example: --auto-tls=false.", diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py index fd6b1d2f4a..6a99802fba 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py @@ -13,9 +13,7 @@ def execute(self) -> Check_Report_Kubernetes: report.status = "PASS" report.status_extended = f"Etcd is not configured to use self-signed certificates for TLS in pod {pod.name}." for container in pod.containers.values(): - if "--auto-tls=" in str(container.command) and "--auto-tls=true" in str( - container.command - ): + if "--auto-tls=true" in str(container.command): report.status = "FAIL" report.status_extended = f"Etcd is configured to use self-signed certificates for TLS in pod {pod.name}." break diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json index 211170305c..59bf2f6600 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "EtcdService", "Description": "This check ensures that etcd is not configured to use automatically generated self-signed certificates for TLS connections between peers. Using self-signed certificates for peer authentication is discouraged in a production environment.", "Risk": "Using self-signed certificates can lead to insecure communications between etcd peers, compromising data security.", - "RelatedUrl": "https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/", + "RelatedUrl": "https://etcd.io/docs/v3.2/op-guide/security/", "Remediation": { "Code": { "CLI": "Configure etcd to avoid using self-signed certificates for peer connections by editing the etcd pod specification file with the --peer-auto-tls parameter set to false. Example: --peer-auto-tls=false.", diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py index c13eb32b44..1340c93aee 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py @@ -13,9 +13,7 @@ def execute(self) -> Check_Report_Kubernetes: report.status = "PASS" report.status_extended = f"Etcd is not using automatically generated self-signed certificates for peer TLS connections in pod {pod.name}." for container in pod.containers.values(): - if "--peer-auto-tls=" in str( - container.command - ) and "--peer-auto-tls=true" in str(container.command): + if "--peer-auto-tls=true" in str(container.command): report.status = "FAIL" report.status_extended = f"Etcd is using automatically generated self-signed certificates for TLS connections in pod {pod.name}." break diff --git a/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.metadata.json index c6ad90df7b..b300366d5f 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "EtcdService", "Description": "This check ensures that etcd is configured for peer authentication by verifying that the --peer-client-cert-auth argument is set to true. This configuration is crucial to ensure that etcd peers in the cluster are authenticated and secure.", "Risk": "Failing to configure peer client authentication can lead to unauthorized access to the etcd cluster, compromising sensitive data.", - "RelatedUrl": "https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/", + "RelatedUrl": "https://etcd.io/docs/v3.2/op-guide/security/", "Remediation": { "Code": { "CLI": "Configure peer client certificate authentication by editing the etcd pod specification file with the --peer-client-cert-auth parameter set to true. Example: --peer-client-cert-auth=true.", diff --git a/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.metadata.json index 06cc4c86a8..03a737a60a 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "EtcdService", "Description": "This check ensures that etcd is configured to use TLS encryption for peer connections, which is crucial for securing sensitive data stored in etcd. It verifies the presence of peer certificate and key file arguments in etcd configuration.", "Risk": "Not configuring TLS for peer connections in etcd can lead to potential data breaches and unauthorized access.", - "RelatedUrl": "https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd", + "RelatedUrl": "https://etcd.io/docs/v3.2/op-guide/security/", "Remediation": { "Code": { "CLI": "Configure peer TLS encryption by editing the etcd pod specification file with appropriate certificate and key files. Example: --peer-client-file= --peer-key-file=.", diff --git a/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.metadata.json index a42e166634..dcf46f8ba6 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "Etcd", "Description": "This check verifies that the etcd service in a Kubernetes cluster is configured with appropriate TLS encryption settings. etcd, being a key value store for all Kubernetes REST API objects, should have its communication encrypted to protect these sensitive objects in transit.", "Risk": "Without proper TLS configuration, data stored in etcd can be susceptible to interception and unauthorized access, posing a significant security risk to the entire Kubernetes cluster.", - "RelatedUrl": "https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#limiting-access-of-etcd-clusters", + "RelatedUrl": "https://etcd.io/docs/v3.2/op-guide/security/", "Remediation": { "Code": { "CLI": "Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master node and set the --cert-file and --key-file arguments appropriately.", diff --git a/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json index 881cff344a..be80f466e6 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "EtcdService", "Description": "This check ensures that etcd uses a unique Certificate Authority (CA) separate from the one used for the overall Kubernetes cluster. This practice enhances the security by restricting access to the etcd database only to clients and peers with certificates issued by the dedicated etcd CA.", "Risk": "Using the same CA for etcd and the Kubernetes cluster can expose etcd to unauthorized access if any certificate issued by the Kubernetes CA is compromised.", - "RelatedUrl": "https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/", + "RelatedUrl": "https://etcd.io/docs/v3.2/op-guide/security/", "Remediation": { "Code": { "CLI": "Configure etcd to use a unique CA by setting the --trusted-ca-file parameter in the etcd pod specification to point to the dedicated etcd CA file. Example: --trusted-ca-file=.", From 45570a636225a1c1729a6fb0e59201590ae28f4a Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Thu, 22 Feb 2024 16:13:12 +0000 Subject: [PATCH 15/21] use latest etcd security link --- .../etcd_client_cert_auth/etcd_client_cert_auth.metadata.json | 2 +- .../etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json | 2 +- .../etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json | 2 +- .../etcd_peer_client_cert_auth.metadata.json | 2 +- .../etcd_peer_tls_config/etcd_peer_tls_config.metadata.json | 2 +- .../etcd/etcd_tls_encryption/etcd_tls_encryption.metadata.json | 2 +- .../services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.metadata.json index 8fe58c1180..ec1897096d 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "EtcdService", "Description": "This check ensures that client authentication is enabled for the etcd service, which is a key-value store used by Kubernetes for persistent storage of all REST API objects. Enabling client authentication helps in securing access to etcd.", "Risk": "If --client-cert-auth is not set to true, etcd service may be accessible by unauthenticated clients, posing a significant security risk.", - "RelatedUrl": "https://etcd.io/docs/v3.2/op-guide/security/", + "RelatedUrl": "https://etcd.io/docs/latest/op-guide/security/", "Remediation": { "Code": { "CLI": "Edit the etcd pod specification file to set --client-cert-auth to true. Example: --client-cert-auth=\"true\".", diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json index 0ee3b4b131..2e49a94b42 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "EtcdService", "Description": "This check ensures that etcd does not use self-signed certificates for TLS, which are less secure than certificates from a trusted authority. Avoiding self-signed certificates enhances the security of etcd.", "Risk": "Using --auto-tls=true may result in the use of self-signed certificates, reducing the overall security of the etcd service.", - "RelatedUrl": "https://etcd.io/docs/v3.2/op-guide/security/", + "RelatedUrl": "https://etcd.io/docs/latest/op-guide/security/", "Remediation": { "Code": { "CLI": "Edit the etcd pod specification file to set --auto-tls to false or remove the parameter. Example: --auto-tls=false.", diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json index 59bf2f6600..64ada44eef 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "EtcdService", "Description": "This check ensures that etcd is not configured to use automatically generated self-signed certificates for TLS connections between peers. Using self-signed certificates for peer authentication is discouraged in a production environment.", "Risk": "Using self-signed certificates can lead to insecure communications between etcd peers, compromising data security.", - "RelatedUrl": "https://etcd.io/docs/v3.2/op-guide/security/", + "RelatedUrl": "https://etcd.io/docs/latest/op-guide/security/", "Remediation": { "Code": { "CLI": "Configure etcd to avoid using self-signed certificates for peer connections by editing the etcd pod specification file with the --peer-auto-tls parameter set to false. Example: --peer-auto-tls=false.", diff --git a/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.metadata.json index b300366d5f..fdd689bc38 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "EtcdService", "Description": "This check ensures that etcd is configured for peer authentication by verifying that the --peer-client-cert-auth argument is set to true. This configuration is crucial to ensure that etcd peers in the cluster are authenticated and secure.", "Risk": "Failing to configure peer client authentication can lead to unauthorized access to the etcd cluster, compromising sensitive data.", - "RelatedUrl": "https://etcd.io/docs/v3.2/op-guide/security/", + "RelatedUrl": "https://etcd.io/docs/latest/op-guide/security/", "Remediation": { "Code": { "CLI": "Configure peer client certificate authentication by editing the etcd pod specification file with the --peer-client-cert-auth parameter set to true. Example: --peer-client-cert-auth=true.", diff --git a/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.metadata.json index 03a737a60a..7d66a369fc 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "EtcdService", "Description": "This check ensures that etcd is configured to use TLS encryption for peer connections, which is crucial for securing sensitive data stored in etcd. It verifies the presence of peer certificate and key file arguments in etcd configuration.", "Risk": "Not configuring TLS for peer connections in etcd can lead to potential data breaches and unauthorized access.", - "RelatedUrl": "https://etcd.io/docs/v3.2/op-guide/security/", + "RelatedUrl": "https://etcd.io/docs/latest/op-guide/security/", "Remediation": { "Code": { "CLI": "Configure peer TLS encryption by editing the etcd pod specification file with appropriate certificate and key files. Example: --peer-client-file= --peer-key-file=.", diff --git a/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.metadata.json index dcf46f8ba6..b272cd5e72 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "Etcd", "Description": "This check verifies that the etcd service in a Kubernetes cluster is configured with appropriate TLS encryption settings. etcd, being a key value store for all Kubernetes REST API objects, should have its communication encrypted to protect these sensitive objects in transit.", "Risk": "Without proper TLS configuration, data stored in etcd can be susceptible to interception and unauthorized access, posing a significant security risk to the entire Kubernetes cluster.", - "RelatedUrl": "https://etcd.io/docs/v3.2/op-guide/security/", + "RelatedUrl": "https://etcd.io/docs/latest/op-guide/security/", "Remediation": { "Code": { "CLI": "Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master node and set the --cert-file and --key-file arguments appropriately.", diff --git a/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json b/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json index be80f466e6..06ee066629 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json +++ b/prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json @@ -13,7 +13,7 @@ "ResourceType": "EtcdService", "Description": "This check ensures that etcd uses a unique Certificate Authority (CA) separate from the one used for the overall Kubernetes cluster. This practice enhances the security by restricting access to the etcd database only to clients and peers with certificates issued by the dedicated etcd CA.", "Risk": "Using the same CA for etcd and the Kubernetes cluster can expose etcd to unauthorized access if any certificate issued by the Kubernetes CA is compromised.", - "RelatedUrl": "https://etcd.io/docs/v3.2/op-guide/security/", + "RelatedUrl": "https://etcd.io/docs/latest/op-guide/security/", "Remediation": { "Code": { "CLI": "Configure etcd to use a unique CA by setting the --trusted-ca-file parameter in the etcd pod specification to point to the dedicated etcd CA file. Example: --trusted-ca-file=.", From 8f2797622ab74e01c419241a1daff7f231347fd4 Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Thu, 22 Feb 2024 16:17:52 +0000 Subject: [PATCH 16/21] fix logic --- .../services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py | 4 +++- .../etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py index 6a99802fba..42285f590c 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py @@ -13,7 +13,9 @@ def execute(self) -> Check_Report_Kubernetes: report.status = "PASS" report.status_extended = f"Etcd is not configured to use self-signed certificates for TLS in pod {pod.name}." for container in pod.containers.values(): - if "--auto-tls=true" in str(container.command): + if "--auto-tls" in str(container.command) and "--auto-tls=true" in str( + container.command + ): report.status = "FAIL" report.status_extended = f"Etcd is configured to use self-signed certificates for TLS in pod {pod.name}." break diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py index 1340c93aee..9ff0858e8a 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py @@ -13,7 +13,9 @@ def execute(self) -> Check_Report_Kubernetes: report.status = "PASS" report.status_extended = f"Etcd is not using automatically generated self-signed certificates for peer TLS connections in pod {pod.name}." for container in pod.containers.values(): - if "--peer-auto-tls=true" in str(container.command): + if "--peer-auto-tls" in str( + container.command + ) and "--peer-auto-tls=true" in str(container.command): report.status = "FAIL" report.status_extended = f"Etcd is using automatically generated self-signed certificates for TLS connections in pod {pod.name}." break From a4a418b159bfd5550f81988f8d5bd45d3aa66044 Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Thu, 22 Feb 2024 16:18:20 +0000 Subject: [PATCH 17/21] fix logic --- .../services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py | 2 +- .../etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py index 42285f590c..2d1d28542c 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py @@ -13,7 +13,7 @@ def execute(self) -> Check_Report_Kubernetes: report.status = "PASS" report.status_extended = f"Etcd is not configured to use self-signed certificates for TLS in pod {pod.name}." for container in pod.containers.values(): - if "--auto-tls" in str(container.command) and "--auto-tls=true" in str( + if "--auto-tls" not in str(container.command) and "--auto-tls=true" not in str( container.command ): report.status = "FAIL" diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py index 9ff0858e8a..8355d3f41f 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py @@ -13,9 +13,9 @@ def execute(self) -> Check_Report_Kubernetes: report.status = "PASS" report.status_extended = f"Etcd is not using automatically generated self-signed certificates for peer TLS connections in pod {pod.name}." for container in pod.containers.values(): - if "--peer-auto-tls" in str( + if "--peer-auto-tls" not in str( container.command - ) and "--peer-auto-tls=true" in str(container.command): + ) and "--peer-auto-tls=true" not in str(container.command): report.status = "FAIL" report.status_extended = f"Etcd is using automatically generated self-signed certificates for TLS connections in pod {pod.name}." break From a52d3adc923eced7f2f917acf4897dba4ee5f216 Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Thu, 22 Feb 2024 16:20:19 +0000 Subject: [PATCH 18/21] Revert "fix logic" This reverts commit a4a418b159bfd5550f81988f8d5bd45d3aa66044. --- .../services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py | 2 +- .../etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py index 2d1d28542c..42285f590c 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py @@ -13,7 +13,7 @@ def execute(self) -> Check_Report_Kubernetes: report.status = "PASS" report.status_extended = f"Etcd is not configured to use self-signed certificates for TLS in pod {pod.name}." for container in pod.containers.values(): - if "--auto-tls" not in str(container.command) and "--auto-tls=true" not in str( + if "--auto-tls" in str(container.command) and "--auto-tls=true" in str( container.command ): report.status = "FAIL" diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py index 8355d3f41f..9ff0858e8a 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py @@ -13,9 +13,9 @@ def execute(self) -> Check_Report_Kubernetes: report.status = "PASS" report.status_extended = f"Etcd is not using automatically generated self-signed certificates for peer TLS connections in pod {pod.name}." for container in pod.containers.values(): - if "--peer-auto-tls" not in str( + if "--peer-auto-tls" in str( container.command - ) and "--peer-auto-tls=true" not in str(container.command): + ) and "--peer-auto-tls=true" in str(container.command): report.status = "FAIL" report.status_extended = f"Etcd is using automatically generated self-signed certificates for TLS connections in pod {pod.name}." break From dc1e8c51671e3a71c5e8ce7916085478dd86c08c Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Thu, 22 Feb 2024 16:20:52 +0000 Subject: [PATCH 19/21] fix logic --- .../services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py | 2 +- .../etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py index 42285f590c..1149f5bcae 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.py @@ -13,7 +13,7 @@ def execute(self) -> Check_Report_Kubernetes: report.status = "PASS" report.status_extended = f"Etcd is not configured to use self-signed certificates for TLS in pod {pod.name}." for container in pod.containers.values(): - if "--auto-tls" in str(container.command) and "--auto-tls=true" in str( + if "--auto-tls" in str(container.command) or "--auto-tls=true" in str( container.command ): report.status = "FAIL" diff --git a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py index 9ff0858e8a..336f8b64b8 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.py @@ -15,7 +15,7 @@ def execute(self) -> Check_Report_Kubernetes: for container in pod.containers.values(): if "--peer-auto-tls" in str( container.command - ) and "--peer-auto-tls=true" in str(container.command): + ) or "--peer-auto-tls=true" in str(container.command): report.status = "FAIL" report.status_extended = f"Etcd is using automatically generated self-signed certificates for TLS connections in pod {pod.name}." break From fbe8b9806bfea9eef239ea666f8b444f928d6e1c Mon Sep 17 00:00:00 2001 From: Sergio Garcia Date: Thu, 22 Feb 2024 16:40:26 +0000 Subject: [PATCH 20/21] fix logic --- .../etcd/etcd_client_cert_auth/etcd_client_cert_auth.py | 4 +++- .../etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.py | 4 +++- .../etcd/etcd_peer_tls_config/etcd_peer_tls_config.py | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.py b/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.py index 152d05c51c..f98a642cd7 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.py @@ -15,7 +15,9 @@ def execute(self) -> Check_Report_Kubernetes: f"Etcd has client certificate authentication enabled in pod {pod.name}." ) for container in pod.containers.values(): - if "--client-cert-auth=true" not in str(container.command): + if "--client-cert-auth=true" not in str( + container.command + ) and "--client-cert-auth=true" not in str(container.command): report.status = "FAIL" report.status_extended = f"Etcd does not have client certificate authentication enabled in pod {pod.name}." break diff --git a/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.py b/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.py index f844efffe1..5a3ba8d76f 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.py @@ -13,7 +13,9 @@ def execute(self) -> Check_Report_Kubernetes: report.status = "PASS" report.status_extended = f"Etcd is configured for peer client certificate authentication in pod {pod.name}." for container in pod.containers.values(): - if "--peer-client-cert-auth=true" not in str(container.command): + if "--peer-client-cert-auth" not in str( + container.command + ) and "--peer-client-cert-auth=true" not in str(container.command): report.status = "FAIL" report.status_extended = f"Etcd does not have peer client certificate authentication configured in pod {pod.name}." break diff --git a/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.py b/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.py index d60476192b..500c45e8ee 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.py @@ -17,7 +17,7 @@ def execute(self) -> Check_Report_Kubernetes: for container in pod.containers.values(): if "--peer-cert-file" not in str( container.command - ) or "--peer-key-file" not in str(container.command): + ) and "--peer-key-file" not in str(container.command): report.status = "FAIL" report.status_extended = f"Etcd does not have TLS configured for peer connections in pod {pod.name}." break From 93517579a86b00beb41a2ec0d9978795467ab8e3 Mon Sep 17 00:00:00 2001 From: Pepe Fagoaga Date: Thu, 22 Feb 2024 17:42:34 +0100 Subject: [PATCH 21/21] fix: typo --- .../etcd/etcd_client_cert_auth/etcd_client_cert_auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.py b/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.py index f98a642cd7..f4d217ac62 100644 --- a/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.py +++ b/prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.py @@ -15,7 +15,7 @@ def execute(self) -> Check_Report_Kubernetes: f"Etcd has client certificate authentication enabled in pod {pod.name}." ) for container in pod.containers.values(): - if "--client-cert-auth=true" not in str( + if "--client-cert-auth" not in str( container.command ) and "--client-cert-auth=true" not in str(container.command): report.status = "FAIL"