diff --git a/translator/app/translator/platforms/base/lucene/mapping.py b/translator/app/translator/platforms/base/lucene/mapping.py index a58d51d1..f2a6615e 100644 --- a/translator/app/translator/platforms/base/lucene/mapping.py +++ b/translator/app/translator/platforms/base/lucene/mapping.py @@ -22,7 +22,10 @@ def prepare_log_source_signature(self, mapping: dict) -> LuceneLogSourceSignatur return LuceneLogSourceSignature(indices=indices, default_source=default_log_source) def get_suitable_source_mappings( - self, field_names: list[str], index: Optional[list[str]] = None + self, + field_names: list[str], + index: Optional[list[str]] = None, + **kwargs, # noqa: ARG002 ) -> list[SourceMapping]: suitable_source_mappings = [] for source_mapping in self._source_mappings.values(): diff --git a/translator/app/translator/platforms/base/lucene/renders/lucene.py b/translator/app/translator/platforms/base/lucene/renders/lucene.py index 6cd789bd..82450b18 100644 --- a/translator/app/translator/platforms/base/lucene/renders/lucene.py +++ b/translator/app/translator/platforms/base/lucene/renders/lucene.py @@ -42,13 +42,13 @@ def _pre_process_values_list( ) -> list[str]: value_type = self.__get_value_type(field, value_type) processed = [] - for v in values: - if isinstance(v, StrValue): - processed.append(self.str_value_manager.from_container_to_str(v, value_type)) - elif isinstance(v, str): - processed.append(self.str_value_manager.escape_manager.escape(v, value_type)) + for val in values: + if isinstance(val, StrValue): + processed.append(self.str_value_manager.from_container_to_str(val, value_type)) + elif isinstance(val, str): + processed.append(self.str_value_manager.escape_manager.escape(val, value_type)) else: - processed.append(str(v)) + processed.append(str(val)) return processed def _pre_process_value( @@ -87,25 +87,32 @@ def not_equal_modifier(self, field: str, value: DEFAULT_VALUE_TYPE) -> str: def contains_modifier(self, field: str, value: DEFAULT_VALUE_TYPE) -> str: if isinstance(value, list): - values = self.or_token.join(f"*{v}*" for v in self._pre_process_values_list(field, value)) + values = self.or_token.join(f"*{val}*" for val in self._pre_process_values_list(field, value)) return f"{field}:({values})" return f"{field}:*{self._pre_process_value(field, value)}*" def endswith_modifier(self, field: str, value: DEFAULT_VALUE_TYPE) -> str: if isinstance(value, list): - values = self.or_token.join(f"*{v}" for v in self._pre_process_values_list(field, value)) + values = self.or_token.join(f"*{val}" for val in self._pre_process_values_list(field, value)) return f"{field}:({values})" return f"{field}:*{self._pre_process_value(field, value)}" def startswith_modifier(self, field: str, value: DEFAULT_VALUE_TYPE) -> str: if isinstance(value, list): - values = self.or_token.join(f"{v}*" for v in self._pre_process_values_list(field, value)) + values = self.or_token.join(f"{val}*" for val in self._pre_process_values_list(field, value)) return f"{field}:({values})" return f"{field}:{self._pre_process_value(field, value)}*" def regex_modifier(self, field: str, value: DEFAULT_VALUE_TYPE) -> str: if isinstance(value, list): - return f"({self.or_token.join(self.regex_modifier(field=field, value=v) for v in value)})" + values = [] + for val in value: + values.append( + f"/{self._pre_process_value(field, val, value_type=ValueType.regex_value)}/" + if isinstance(val, StrValue) + else f"/{val}/" + ) + return f"{field}:({self.or_token.join(values)})" if isinstance(value, StrValue): return f"{field}:/{self._pre_process_value(field, value, value_type=ValueType.regex_value)}/" @@ -114,7 +121,7 @@ def regex_modifier(self, field: str, value: DEFAULT_VALUE_TYPE) -> str: def keywords(self, field: str, value: DEFAULT_VALUE_TYPE) -> str: if isinstance(value, list): - return f"({self.or_token.join(self.keywords(field=field, value=v) for v in value)})" + return f"({self.or_token.join(self.keywords(field=field, value=val) for val in value)})" return f"*{self._pre_process_value(field, value)}*" diff --git a/translator/app/translator/platforms/base/spl/tokenizer.py b/translator/app/translator/platforms/base/spl/tokenizer.py index 1730d619..64b6b014 100644 --- a/translator/app/translator/platforms/base/spl/tokenizer.py +++ b/translator/app/translator/platforms/base/spl/tokenizer.py @@ -42,7 +42,7 @@ class SplTokenizer(QueryTokenizer, ANDLogicOperatorMixin): field_pattern = r"(?P[a-zA-Z0-9\.\-_\{\}]+)" num_value_pattern = rf"(?P<{ValueType.number_value}>\d+(?:\.\d+)*)(?=$|\s|\))" - double_quotes_value_pattern = rf'"(?P<{ValueType.double_quotes_value}>(?:[:a-zA-Z\*0-9=+%#\-_/,;`\?~‘○×\'\.<>$&^@!\]\[\(\)\{{\}}\s]|\\\"|\\)*)"\s*' # noqa: E501 + double_quotes_value_pattern = rf'"(?P<{ValueType.double_quotes_value}>(?:[:a-zA-Z\*0-9=+%#\-_/,;`\?~‘○×\'\.<>$&^@!\]\[\(\)\{{\}}\s]|\\\"|\\)*)"\s*' # noqa: E501, RUF001 single_quotes_value_pattern = ( rf"'(?P<{ValueType.single_quotes_value}>(?:[:a-zA-Z\*0-9=+%#\-_/,;\"\.<>$&^@!\(\)\{{\}}\s]|\\\'|\\)*)'\s*" ) diff --git a/translator/app/translator/platforms/elasticsearch/renders/detection_rule.py b/translator/app/translator/platforms/elasticsearch/renders/detection_rule.py index ae9876c1..7211f9b9 100644 --- a/translator/app/translator/platforms/elasticsearch/renders/detection_rule.py +++ b/translator/app/translator/platforms/elasticsearch/renders/detection_rule.py @@ -18,9 +18,10 @@ """ import copy -import json from typing import Optional, Union +import ujson + from app.translator.core.mapping import SourceMapping from app.translator.core.mitre import MitreConfig from app.translator.core.models.parser_output import MetaInfoContainer @@ -113,7 +114,7 @@ def finalize_query( "false_positives": meta_info.false_positives, } ) - rule_str = json.dumps(rule, indent=4, sort_keys=False, ensure_ascii=False) + rule_str = ujson.dumps(rule, indent=4, sort_keys=False, ensure_ascii=False) if not_supported_functions: rendered_not_supported = self.render_not_supported_functions(not_supported_functions) return rule_str + rendered_not_supported diff --git a/translator/app/translator/platforms/elasticsearch/renders/kibana.py b/translator/app/translator/platforms/elasticsearch/renders/kibana.py index 20957e5c..af5b8438 100644 --- a/translator/app/translator/platforms/elasticsearch/renders/kibana.py +++ b/translator/app/translator/platforms/elasticsearch/renders/kibana.py @@ -17,9 +17,10 @@ ----------------------------------------------------------------- """ import copy -import json from typing import Optional +import ujson + from app.translator.core.mapping import SourceMapping from app.translator.core.models.parser_output import MetaInfoContainer from app.translator.core.models.platform_details import PlatformDetails @@ -56,7 +57,7 @@ def finalize_query( query = super().finalize_query(prefix=prefix, query=query, functions=functions) search_source = copy.deepcopy(KIBANA_SEARCH_SOURCE_JSON) search_source["query"]["query_string"]["query"] = query - dumped_rule = json.dumps(search_source, sort_keys=False) + dumped_rule = ujson.dumps(search_source, sort_keys=False, escape_forward_slashes=False) rule = copy.deepcopy(KIBANA_RULE) rule["_source"]["kibanaSavedObjectMeta"]["searchSourceJSON"] = dumped_rule rule["_source"]["title"] = meta_info.title @@ -67,7 +68,7 @@ def finalize_query( license_=meta_info.license, references=meta_info.references, ) - rule_str = json.dumps(rule, indent=4, sort_keys=False) + rule_str = ujson.dumps(rule, indent=4, sort_keys=False) if not_supported_functions: rendered_not_supported = self.render_not_supported_functions(not_supported_functions) return rule_str + rendered_not_supported diff --git a/translator/app/translator/platforms/elasticsearch/renders/xpack_watcher.py b/translator/app/translator/platforms/elasticsearch/renders/xpack_watcher.py index 1840b4e5..099a480c 100644 --- a/translator/app/translator/platforms/elasticsearch/renders/xpack_watcher.py +++ b/translator/app/translator/platforms/elasticsearch/renders/xpack_watcher.py @@ -17,9 +17,10 @@ ----------------------------------------------------------------- """ import copy -import json from typing import Optional +import ujson + from app.translator.core.mapping import SourceMapping from app.translator.core.models.parser_output import MetaInfoContainer from app.translator.core.models.platform_details import PlatformDetails @@ -72,7 +73,7 @@ def finalize_query( indices = source_mapping and [str(source_mapping.log_source_signature)] or [] rule["input"]["search"]["request"]["indices"] = indices rule["actions"]["send_email"]["email"]["subject"] = meta_info.title - rule_str = json.dumps(rule, indent=4, sort_keys=False) + rule_str = ujson.dumps(rule, indent=4, sort_keys=False) if not_supported_functions: rendered_not_supported = self.render_not_supported_functions(not_supported_functions) return rule_str + rendered_not_supported diff --git a/translator/app/translator/platforms/opensearch/renders/opensearch.py b/translator/app/translator/platforms/opensearch/renders/opensearch.py index fce6bca5..f8103456 100644 --- a/translator/app/translator/platforms/opensearch/renders/opensearch.py +++ b/translator/app/translator/platforms/opensearch/renders/opensearch.py @@ -21,6 +21,7 @@ from app.translator.const import DEFAULT_VALUE_TYPE from app.translator.core.custom_types.values import ValueType from app.translator.core.models.platform_details import PlatformDetails +from app.translator.core.str_value_manager import StrValue from app.translator.platforms.base.lucene.renders.lucene import LuceneFieldValue, LuceneQueryRender from app.translator.platforms.opensearch.const import opensearch_query_details from app.translator.platforms.opensearch.mapping import OpenSearchMappings, opensearch_mappings @@ -31,7 +32,7 @@ class OpenSearchFieldValue(LuceneFieldValue): def equal_modifier(self, field: str, value: DEFAULT_VALUE_TYPE) -> str: if isinstance(value, list): - values = self.or_token.join(f'"{v}"' for v in self._pre_process_values_list(field, value)) + values = self.or_token.join(f'"{val}"' for val in self._pre_process_values_list(field, value)) return f"{field}:({values})" return f'{field}:"{self._pre_process_value(field, value)}"' @@ -49,36 +50,47 @@ def greater_or_equal_modifier(self, field: str, value: Union[int, str]) -> str: def not_equal_modifier(self, field: str, value: DEFAULT_VALUE_TYPE) -> str: if isinstance(value, list): - values = self.or_token.join(f'"{v}"' for v in self._pre_process_values_list(field, value)) + values = self.or_token.join(f'"{val}"' for val in self._pre_process_values_list(field, value)) return f"NOT ({field} = ({values})" return f'NOT ({field} = "{self._pre_process_value(field, value)}")' def contains_modifier(self, field: str, value: DEFAULT_VALUE_TYPE) -> str: if isinstance(value, list): - values = self.or_token.join(f'"*{v}*"' for v in self._pre_process_values_list(field, value)) + values = self.or_token.join(f'"*{val}*"' for val in self._pre_process_values_list(field, value)) return f"{field}:({values})" return f'{field}:"*{self._pre_process_value(field, value)}*"' def endswith_modifier(self, field: str, value: DEFAULT_VALUE_TYPE) -> str: if isinstance(value, list): - values = self.or_token.join(f'"*{v}"' for v in self._pre_process_values_list(field, value)) + values = self.or_token.join(f'"*{val}"' for val in self._pre_process_values_list(field, value)) return f"{field}:({values})" return f'{field}:"*{self._pre_process_value(field, value)}"' def startswith_modifier(self, field: str, value: DEFAULT_VALUE_TYPE) -> str: if isinstance(value, list): - values = self.or_token.join(f'"{v}*"' for v in self._pre_process_values_list(field, value)) + values = self.or_token.join(f'"{val}*"' for val in self._pre_process_values_list(field, value)) return f"{field}:({values})" return f'{field}:"{self._pre_process_value(field, value)}*"' def regex_modifier(self, field: str, value: DEFAULT_VALUE_TYPE) -> str: if isinstance(value, list): - return f"({self.or_token.join(self.regex_modifier(field=field, value=v) for v in value)})" - return f'{field}:"/{self._pre_process_value(field, value, value_type=ValueType.regex_value)}/"' + values = [] + for val in value: + values.append( + f'"/{self._pre_process_value(field, val, value_type=ValueType.regex_value)}/"' + if isinstance(val, StrValue) + else f'"/{val}/"' + ) + return f"{field}:({self.or_token.join(values)})" + + if isinstance(value, StrValue): + return f'{field}:"/{self._pre_process_value(field, value, value_type=ValueType.regex_value)}/"' + + return f'{field}:"/{value}/"' def keywords(self, field: str, value: DEFAULT_VALUE_TYPE) -> str: if isinstance(value, list): - return f"({self.or_token.join(self.keywords(field=field, value=v) for v in value)})" + return f"({self.or_token.join(self.keywords(field=field, value=val) for val in value)})" return f'"*{self._pre_process_value(field, value)}*"' diff --git a/translator/app/translator/platforms/opensearch/renders/opensearch_rule.py b/translator/app/translator/platforms/opensearch/renders/opensearch_rule.py index d2d591de..f94c48c2 100644 --- a/translator/app/translator/platforms/opensearch/renders/opensearch_rule.py +++ b/translator/app/translator/platforms/opensearch/renders/opensearch_rule.py @@ -17,9 +17,10 @@ ----------------------------------------------------------------- """ import copy -import json from typing import Optional +import ujson + from app.translator.core.custom_types.meta_info import SeverityType from app.translator.core.mapping import SourceMapping from app.translator.core.models.parser_output import MetaInfoContainer @@ -63,7 +64,7 @@ def finalize_query( rule["inputs"][0]["search"]["query"]["query"]["bool"]["must"][0]["query_string"]["query"] = query rule["triggers"][0]["name"] = meta_info.title rule["triggers"][0]["severity"] = _SEVERITIES_MAP[meta_info.severity] - rule_str = json.dumps(rule, indent=4, sort_keys=False) + rule_str = ujson.dumps(rule, indent=4, sort_keys=False) if not_supported_functions: rendered_not_supported = self.render_not_supported_functions(not_supported_functions) return rule_str + rendered_not_supported diff --git a/translator/requirements.txt b/translator/requirements.txt index 064c959c..124d702e 100644 --- a/translator/requirements.txt +++ b/translator/requirements.txt @@ -4,3 +4,4 @@ pydantic~=1.10.13 PyYAML~=6.0.1 colorama~=0.4.6 ruff==0.1.13 +ujson==5.9.0 pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy