diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 4cce788e7..0462ccfb7 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -5,5 +5,4 @@ # the repo. Unless a later match takes precedence, # @global-owner1 and @global-owner2 will be requested for # review when someone opens a pull request. -* @devanshah2 @Tal-Daniel @ofer-haim @natalygmkibm @apurva-birajdar @itai-g-weather-com @RefaelAdi @PratikshaSonawane @mwnovak-ibm @hadarkorny @doryo @chirag-ibm @rasikashete3 @pankajkumaribm @taees-eimouri @pankajkumar @piyush-desai-ibm @ShalakaKulkarni15 @JingqiuDu - +* @devanshah2 @Tal-Daniel @ofer-haim @natalygmkibm @apurva-birajdar @itai-g-weather-com @RefaelAdi @PratikshaSonawane @mwnovak-ibm @hadarkorny @doryo @chirag-ibm @rasikashete3 @pankajkumaribm @taees-eimouri @pankajkumar @piyush-desai-ibm @ShalakaKulkarni15 @JingqiuDu @rupathil @zeeIBM @Rose-Kaur diff --git a/build/defaultOfflinePackagePlugins.txt b/build/defaultOfflinePackagePlugins.txt index 3a906421f..bd4f006a3 100644 --- a/build/defaultOfflinePackagePlugins.txt +++ b/build/defaultOfflinePackagePlugins.txt @@ -36,9 +36,11 @@ filter-plugin/logstash-filter-intersystems-iris-guardium/logstash-filter-intersy filter-plugin/logstash-filter-postgres-ibmcloud-guardium/logstash-filter-icd_postgresql_guardium_filter filter-plugin/logstash-filter-mysql-azure-guardium/logstash-filter-azure_mysql_guardium_filter filter-plugin/logstash-filter-scylldb-guardium/logstash-filter-scylladb_guardium_filter +filter-plugin/logstash-filter-databricks-guardium/logstash-filter-databricks_guardium_filter +filter-plugin/logstash-filter-trino-guardium/logstash-filter-trino_guardium_filter +filter-plugin/logstash-filter-capella-guardium/logstash-filter-capella_guardium_filter +filter-plugin/logstash-filter-opensearch-guardium/logstash-filter-opensearch_guardium_filter +input-plugin/logstash-input-couchbase-capella/logstash-input-couchbase_capella_input +input-plugin/logstash-input-http/logstash-offline-input-http-plugins input-plugin/logstash-input-mongo-atlas/logstash-input-mongo_atlas_input -input-plugin/logstash-input-couchbase-capella/logstash-input-couchbase-capella_input -input-plugin/logstash-input-http/logstash-input-http_input -filter-plugin/logstash-filter-capella-guardium/logstash-filter-capella-guardium_filter -filter-plugin/logstash-filter-databricks-guardium/logstash-filter-databricks-guardium_filter -filter-plugin/logstash-filter-trino-guardium/logstash-filter-trino-guardium_filter \ No newline at end of file + diff --git a/build/pluginsToBuild_GDP.txt b/build/pluginsToBuild_GDP.txt index 18a9481c5..f90b4450f 100644 --- a/build/pluginsToBuild_GDP.txt +++ b/build/pluginsToBuild_GDP.txt @@ -36,9 +36,10 @@ filter-plugin/logstash-filter-intersystems-iris-guardium filter-plugin/logstash-filter-postgres-ibmcloud-guardium filter-plugin/logstash-filter-mysql-azure-guardium filter-plugin/logstash-filter-scylldb-guardium -input-plugin/logstash-input-mongo-atlas +filter-plugin/logstash-filter-databricks-guardium +filter-plugin/logstash-filter-trino-guardium +filter-plugin/logstash-filter-capella-guardium +filter-plugin/logstash-filter-opensearch-guardium input-plugin/logstash-input-couchbase-capella input-plugin/logstash-input-http -filter-plugin/logstash-filter-capella-guardium -filter-plugin/logstash-filter-databricks-guardium -filter-plugin/logstash-filter-trino-guardium \ No newline at end of file +input-plugin/logstash-input-mongo-atlas diff --git a/build/verifiedUCPlugins_gdp.txt b/build/verifiedUCPlugins_gdp.txt index a50cc825a..df6a332e3 100644 --- a/build/verifiedUCPlugins_gdp.txt +++ b/build/verifiedUCPlugins_gdp.txt @@ -43,12 +43,18 @@ filter-plugin/logstash-filter-mysql-guardium/MySQLOverSyslogPackage #Other filter-plugin/logstash-filter-mongodb-guardium/MongodbOverMongoAtlasPackage filter-plugin/logstash-filter-azure-postgresql-guardium/AzurePostgresqlOverAzureEventHub +filter-plugin/logstash-filter-databricks-guardium/AzureDatabricksOverAzureEventHub +filter-plugin/logstash-filter-trino-guardium/TrinoOverSyslogPackage +filter-plugin/logstash-filter-capella-guardium/CapellaCouchbaseOverCapellaPackage +filter-plugin/logstash-filter-opensearch-guardium/OpenSearchOverCloudwatchPackage #Input plug-ins input-plugin/logstash-input-azure-event-hubs/AzureEventHubsInputPackage input-plugin/logstash-input-beats/FilebeatInputPackage +input-plugin/logstash-input-couchbase-capella/InputCouchbaseCapellaPackage input-plugin/logstash-input-cloudwatch-logs/CloudwatchLogsInputPackage input-plugin/logstash-input-jdbc/JdbcInputPackage input-plugin/logstash-input-mongo-atlas/InputMongoAtlasPackage input-plugin/logstash-input-sqs/SQSInputPackage input-plugin/logstash-input-tcp-syslog/SyslogInputPackage -input-plugin/logstash-input-google-pubsub/GooglePubSubPackage \ No newline at end of file +input-plugin/logstash-input-http/httpInputPackage +input-plugin/logstash-input-google-pubsub/GooglePubSubPackage diff --git a/common/build.gradle b/common/build.gradle index faae36a69..e3fd9eaab 100644 --- a/common/build.gradle +++ b/common/build.gradle @@ -64,3 +64,11 @@ artifacts { archives sourcesJar archives javadocJar } + +task copyDependencies(type: Copy) { + description 'Copies all runtime dependencies into build/libs directory' + from configurations.runtimeClasspath + into "${buildDir}/libs" +} + +jar.finalizedBy(copyDependencies) diff --git a/filter-plugin/logstash-filter-azure-sql-guardium/README.md b/filter-plugin/logstash-filter-azure-sql-guardium/README.md index 758b40a9d..bf4f17f82 100644 --- a/filter-plugin/logstash-filter-azure-sql-guardium/README.md +++ b/filter-plugin/logstash-filter-azure-sql-guardium/README.md @@ -108,7 +108,7 @@ The Guardium universal connector is the Guardium entry point for native audit lo **Note**: For Guardium Data Protection version 11.4 without appliance bundle 11.0p490 or prior or Guardium Data Protection version 11.5 without appliance bundle 11.0p540 or prior, download the [Azure-SQL-Offline-Package.zip](https://github.com/IBM/universal-connectors/releases/download/v1.5.6/logstash-filter-azuresql_guardium_plugin_filter.zip) plug-in. (Do not unzip the offline-package file throughout the procedure). -• Download the mssql-jdbc-7.4.1.jre8 from [here](https://jar-download.com/artifacts/com.microsoft.sqlserver/mssql-jdbc/7.4.1.jre8) +• Download the mssql-jdbc-7.4.1.jre8 from [here](https://repo1.maven.org/maven2/com/microsoft/sqlserver/mssql-jdbc/7.4.1.jre8/mssql-jdbc-7.4.1.jre8.jar) #### Configuration diff --git a/filter-plugin/logstash-filter-capella-guardium/capellaCouchbaseOverCapellaPackage/capella/capellaCouchbase.conf b/filter-plugin/logstash-filter-capella-guardium/CapellaCouchbaseOverCapellaPackage/capellaCouchbase.conf similarity index 100% rename from filter-plugin/logstash-filter-capella-guardium/capellaCouchbaseOverCapellaPackage/capella/capellaCouchbase.conf rename to filter-plugin/logstash-filter-capella-guardium/CapellaCouchbaseOverCapellaPackage/capellaCouchbase.conf diff --git a/filter-plugin/logstash-filter-capella-guardium/README.md b/filter-plugin/logstash-filter-capella-guardium/README.md index fff78f058..17f9f65d7 100644 --- a/filter-plugin/logstash-filter-capella-guardium/README.md +++ b/filter-plugin/logstash-filter-capella-guardium/README.md @@ -118,7 +118,7 @@ enforcements. Configure Guardium to read the native audit/data_access logs by cu the [logstash-input-couchbase_capella_input](../../input-plugin/logstash-input-couchbase-capella/logstash-input-couchbase_capella_input.zip) plug-in. * Download - the [logstash-filter-capella_guardium_filter](capellaCouchbaseOverCapellaPackage/capella/logstash-filter-capella_guardium_filter.zip) + the [logstash-filter-capella_guardium_filter](logstash-filter-capella_guardium_filter.zip) plug-in. * Capella-Guardium Logstash filter plug-in is automatically available with Guardium Data Protection versions 12.x, 11.4 with appliance bundle 11.0p490 or later or Guardium Data Protection version 11.5 with appliance bundle 11.0p540 or @@ -129,15 +129,15 @@ enforcements. Configure Guardium to read the native audit/data_access logs by cu 1. On the collector, go to ```Setup``` > ```Tools and Views``` > ```Configure Universal Connector```. 2. Enable the universal connector if it is disabled. 3. Click ```Upload File``` and select the - offline [logstash-filter-capella_guardium_filter](capellaCouchbaseOverCapellaPackage/capella/logstash-filter-capella_guardium_filter.zip) + offline [logstash-filter-capella_guardium_filter](logstash-filter-capella_guardium_filter.zip) plug-in. After it is uploaded, click ```OK```. 4. Click the Plus sign to open the Connector Configuration dialog box. 5. Type a name in the Connector name field. 6. Update the input section to add the details from - the [capellaCouchbase.conf](capellaCouchbaseOverCapellaPackage/capella/capellaCouchbase.conf) file's input part, + the [capellaCouchbase.conf](CapellaCouchbaseOverCapellaPackage/capellaCouchbase.conf) file's input part, omitting the keyword "input{" at the beginning and its corresponding "}" at the end. 7. Update the filter section to add the details from - the [capellaCouchbase.conf](capellaCouchbaseOverCapellaPackage/capella/capellaCouchbase.conf) file's filter part, + the [capellaCouchbase.conf](CapellaCouchbaseOverCapellaPackage/capellaCouchbase.conf) file's filter part, omitting the keyword "filter{" at the beginning and its corresponding "}" at the end. 8. The 'type' fields should match in the input and filter configuration sections. This field should be unique for every individual connector added. diff --git a/filter-plugin/logstash-filter-capella-guardium/capellaCouchbaseOverCapellaPackage/capella/logstash-filter-capella_guardium_filter.zip b/filter-plugin/logstash-filter-capella-guardium/logstash-filter-capella_guardium_filter.zip similarity index 53% rename from filter-plugin/logstash-filter-capella-guardium/capellaCouchbaseOverCapellaPackage/capella/logstash-filter-capella_guardium_filter.zip rename to filter-plugin/logstash-filter-capella-guardium/logstash-filter-capella_guardium_filter.zip index 3aefbc587..3325ac47c 100644 Binary files a/filter-plugin/logstash-filter-capella-guardium/capellaCouchbaseOverCapellaPackage/capella/logstash-filter-capella_guardium_filter.zip and b/filter-plugin/logstash-filter-capella-guardium/logstash-filter-capella_guardium_filter.zip differ diff --git a/filter-plugin/logstash-filter-capella-guardium/src/main/java/com/ibm/guardium/capella/Parser.java b/filter-plugin/logstash-filter-capella-guardium/src/main/java/com/ibm/guardium/capella/Parser.java index 5b0b2f9e4..5cd60c7dd 100644 --- a/filter-plugin/logstash-filter-capella-guardium/src/main/java/com/ibm/guardium/capella/Parser.java +++ b/filter-plugin/logstash-filter-capella-guardium/src/main/java/com/ibm/guardium/capella/Parser.java @@ -130,7 +130,7 @@ protected ExceptionRecord getException(String payload, String sqlString) { return exceptionRecord; } else if (statement != null && !status.contains(SUCCESS_STATUS)) { exceptionRecord.setDescription(serviceName); - exceptionRecord.setSqlString(sqlString); + exceptionRecord.setSqlString(statement); exceptionRecord.setExceptionTypeId(SQL_ERROR); return exceptionRecord; } diff --git a/filter-plugin/logstash-filter-capella-guardium/src/test/java/com/ibm/guardium/capella/ParserTest.java b/filter-plugin/logstash-filter-capella-guardium/src/test/java/com/ibm/guardium/capella/ParserTest.java index 2976af75c..20f26154e 100644 --- a/filter-plugin/logstash-filter-capella-guardium/src/test/java/com/ibm/guardium/capella/ParserTest.java +++ b/filter-plugin/logstash-filter-capella-guardium/src/test/java/com/ibm/guardium/capella/ParserTest.java @@ -753,5 +753,6 @@ void testSQLError() { assertEquals("COUCHB", record.getAccessor().getLanguage()); assertEquals("UNRECOGNIZED statement", record.getException().getDescription()); assertEquals("SQL_ERROR", record.getException().getExceptionTypeId()); + assertEquals("select * fro test;", record.getException().getSqlString()); } } diff --git a/filter-plugin/logstash-filter-mongodb-guardium/MongoDBOverSyslogPackage/mongodbSyslog.conf b/filter-plugin/logstash-filter-mongodb-guardium/MongoDBOverSyslogPackage/mongodbSyslog.conf new file mode 100644 index 000000000..a65597467 --- /dev/null +++ b/filter-plugin/logstash-filter-mongodb-guardium/MongoDBOverSyslogPackage/mongodbSyslog.conf @@ -0,0 +1,54 @@ +#/* +#Copyright 2020-2021 IBM Inc. All rights reserved +#SPDX-License-Identifier: Apache-2.0 +#*/ + +input { + tcp { + port => 5001 + type => "syslog-mongodb" + dns_reverse_lookup_enabled => false + ssl_enable => true + # ssl_certificate_authorities => SSL_CERT_AUTH + ssl_cert => "/service/certs/external/tls-syslog.crt" + ssl_key => "/service/certs/external/tls-syslog.key" + ssl_verify => true + } +} + + + +filter { +if [type] == "syslog-mongodb" { + # break apart the message and prepare for what filter expects + grok { + match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:server_hostname} %{SYSLOGPROG:source_program}(?:[%{POSINT:syslog_pid}])?: %{GREEDYDATA:syslog_message}" } + } + + date { + match => [ "timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ] + } + + mutate { rename => { "host" => "server_ip" } } + + # send to filter + mongodb_guardium_filter {} + + # keep original event fields, for debugging + if "_mongoguardium_json_parse_error" not in [tags] { + mutate { remove_field => [ + "message", "syslog_timestamp", "source_program", "program", + "syslog_pid", "syslog_message", + "server_hostname", "client_hostname", "host", + "ecs", "log", "agent", "input"] + } + } +} + + +# uncomment to test events/sec +# metrics { +# meter => "events" +# add_tag => "metric" +# } +} diff --git a/filter-plugin/logstash-filter-mongodb-guardium/MongodbOverFilebeatPackage/mongodbFilebeat.conf b/filter-plugin/logstash-filter-mongodb-guardium/MongodbOverFilebeatPackage/mongodbFilebeat.conf new file mode 100644 index 000000000..26473213d --- /dev/null +++ b/filter-plugin/logstash-filter-mongodb-guardium/MongodbOverFilebeatPackage/mongodbFilebeat.conf @@ -0,0 +1,36 @@ +#/* +#Copyright 2020-2021 IBM Inc. All rights reserved +#SPDX-License-Identifier: Apache-2.0 +#*/ + +input { + beats { + port => + type => "filebeat" + # For SSL over Filebeat, uncomment the following lines after generating an SSL key and a certificate authority (CA) using GuardAPI (see documentation), copy the public certificate authority (CA) to your data source and adjust Filebeat configuration: + #ssl => true + #ssl_certificate => "${SSL_DIR}/cert.pem" + #ssl_key => "${SSL_DIR}/key.pem" + } +} +filter{ + if [type] == "filebeat" and "guc_filter_param_datasource_tag" in [tags] { + mutate { add_field => { "source_program" => "mongod" } } + mutate { add_field => { "server_hostname" => "%{[host][name]}" } } + mutate { add_field => { "server_ip" => "%{[host][ip][0]}" } } + mutate { replace => { "message" => "%{source_program}: %{message}" } } + + mongodb_guardium_filter {} + + if "_mongoguardium_json_parse_error" not in [tags] { + mutate { remove_field => ["message","syslog_timestamp","source_program","program","syslog_pid","syslog_message","server_hostname","client_hostname","host","ecs","log","agent","input"] } + } + } + +# uncomment to test events/sec +# metrics { +# meter => "events" +# add_tag => "metric" +# } +} + diff --git a/filter-plugin/logstash-filter-mongodb-guardium/MongodbOverMongoAtlasPackage/mongodbAtlas.conf b/filter-plugin/logstash-filter-mongodb-guardium/MongodbOverMongoAtlasPackage/mongodbAtlas.conf new file mode 100644 index 000000000..7c9e89f27 --- /dev/null +++ b/filter-plugin/logstash-filter-mongodb-guardium/MongodbOverMongoAtlasPackage/mongodbAtlas.conf @@ -0,0 +1,34 @@ +input { + mongo_atlas_input{ + interval => 300 + public-key => "" + private-key => "" + group-id => "" # example, 61f8b9021d9dcc4b97fbfcf1 + hostname => "" # example, cluster1-shard-00-02.i2jq9.mongodb.net + type => "mongodbatlas" + } +} + +filter { + if [type] == "mongodbatlas" { + mutate { add_field => { "source_program" => "mongod" } } + mutate { add_field => { "client_hostname" => "%{[agent][hostname]}" } } + mutate { add_field => { "server_hostname" => "%{hostname}" } } + mutate { add_field => { "server_ip" => "%{[host][ip][0]}" } } + mutate { replace => { "message" => "%{source_program}: %{message}" } } + + mongodb_guardium_filter {} + + # keep original event fields, for debugging + if "_mongoguardium_json_parse_error" not in [tags] { + mutate { remove_field => [ + "message", "syslog_timestamp", + "source_program", "program", + "syslog_pid", "syslog_message", + "server_hostname", "client_hostname", "host", + "ecs", "log", "agent", "input"] + } + } + } +} + diff --git a/filter-plugin/logstash-filter-mongodb-guardium/README.md b/filter-plugin/logstash-filter-mongodb-guardium/README.md index 580c3c76a..1126a0d44 100644 --- a/filter-plugin/logstash-filter-mongodb-guardium/README.md +++ b/filter-plugin/logstash-filter-mongodb-guardium/README.md @@ -60,14 +60,14 @@ The filter plug-in also supports sending errors. For this, MongoDB access contro * The "type" fields should match in the input and the filter configuration sections. This field should be unique for every individual connector added. ## Example -### Filebeat input +### Sample Audit Log A typical original log file looks like: ``` { "atype" : "authCheck", "ts" : { "$date" : "2020-02-16T03:21:58.185-0500" }, "local" : { "ip" : "127.0.30.1", "port" : 0 }, "remote" : { "ip" : "127.0.20.1", "port" : 0 }, "users" : [], "roles" : [], "param" : { "command" : "find", "ns" : "config.transactions", "args" : { "find" : "transactions", "filter" : { "lastWriteDate" : { "$lt" : { "$date" : "2020-02-16T02:51:58.185-0500" } } }, "projection" : { "_id" : 1 }, "sort" : { "_id" : 1 }, "$db" : "config" } }, "result" : 0 } ``` -The Filebeat version of the same file looks like: +### The Filebeat version of the same Sample Audit Log looks like: ``` { "@version" => "1", @@ -105,46 +105,6 @@ The Filebeat version of the same file looks like: } ``` -## Filter result -The filter tweaks the event by adding a _GuardRecord_ field to it with a JSON representation of a Guardium record object. As the filter takes the responsiblity of breaking the database command into its atomic parts, it details the construct object with the parsed command structure: - { - - "sequence" => 0, - "GuardRecord" => "{"sessionId":"mV20eHvvRha2ELTeqJxQJg\u003d\u003d","dbName":"admin","appUserName":"","time":{"timstamp":1591883051070,"minOffsetFromGMT":-240,"minDst":0},"sessionLocator":{"clientIp":"9.148.202.94","clientPort":60185,"serverIp":"9.70.147.59","serverPort":27017,"isIpv6":false,"clientIpv6":"","serverIpv6":""},"accessor":{"dbUser":"realAdmin ","serverType":"MongoDB","serverOs":"","clientOs":"","clientHostName":"","serverHostName":"","commProtocol":"","dbProtocol":"MongoDB native audit","dbProtocolVersion":"","osUser":"","sourceProgram":"","client_mac":"","serverDescription":"","serviceName":"admin","language":"FREE_TEXT","dataType":"CONSTRUCT"},"data":{"construct":{"sentences":[{"verb":"find","objects":[{"name":"USERS","type":"collection","fields":[],"schema":""}],"descendants":[],"fields":[]}],"fullSql":"{\"atype\":\"authCheck\",\"ts\":{\"$date\":\"2020-06-11T09:44:11.070-0400\"},\"local\":{\"ip\":\"9.70.147.59\",\"port\":27017},\"remote\":{\"ip\":\"9.148.202.94\",\"port\":60185},\"users\":[{\"user\":\"realAdmin\",\"db\":\"admin\"}],\"roles\":[{\"role\":\"readWriteAnyDatabase\",\"db\":\"admin\"},{\"role\":\"userAdminAnyDatabase\",\"db\":\"admin\"}],\"param\":{\"command\":\"find\",\"ns\":\"admin.USERS\",\"args\":{\"find\":\"USERS\",\"filter\":{},\"lsid\":{\"id\":{\"$binary\":\"mV20eHvvRha2ELTeqJxQJg\u003d\u003d\",\"$type\":\"04\"}},\"$db\":\"admin\",\"$readPreference\":{\"mode\":\"primaryPreferred\"}}},\"result\":0}","redactedSensitiveDataSql":"{\"atype\":\"authCheck\",\"ts\":{\"$date\":\"2020-06-11T09:44:11.070-0400\"},\"local\":{\"ip\":\"9.70.147.59\",\"port\":27017},\"remote\":{\"ip\":\"9.148.202.94\",\"port\":60185},\"users\":[{\"user\":\"realAdmin\",\"db\":\"admin\"}],\"roles\":[{\"role\":\"readWriteAnyDatabase\",\"db\":\"admin\"},{\"role\":\"userAdminAnyDatabase\",\"db\":\"admin\"}],\"param\":{\"command\":\"find\",\"ns\":\"admin.USERS\",\"args\":{\"filter\":{},\"lsid\":{\"id\":{\"$binary\":\"?\",\"$type\":\"?\"}},\"$readPreference\":{\"mode\":\"?\"},\"find\":\"USERS\",\"$db\":\"admin\"}},\"result\":0}"},"originalSqlCommand":""},"exception":null}", - "@version" => "1", - "@timestamp" => 2020-02-25T12:32:16.314Z, - "type" => "syslog", - "timestamp" => "2020-01-26T10:47:41.225-0500" - } - -This Guardium record, which is added to Logstash event after the filter, is examined and handled by Guardium universal connector (in an output stage) and inserted into Guardium. - -If the event message is not related to MongoDB, the event is tagged with "_mongoguardium_skip_not_mongodb" (not removed from the pipeline). If it is an event from MongoDB but JSON parsing fails, the event is tagged with "_mongoguardium_json_parse_error" but not removed (this may happen if the syslog message is too long and was truncated). These tags can be useful for debugging purposes. - - -To build and create an updated GEM of this filter plug-in which can be installed onto Logstash: -1. Build Logstash from the repository source. -2. Create or edit _gradle.properties_ and add the LOGSTASH_CORE_PATH variable with the path to the logstash-core folder. For example: - - ```LOGSTASH_CORE_PATH=/Users/taldan/logstash76/logstash-core``` - -3. Run ```$ ./gradlew.unix gem --info``` to create the GEM. - - **Note**: Ensure that JRuby is already installed. - -## Install -To install this plug-in on your local developer machine with Logstash installed, issue this command: - - $ ~/Downloads/logstash-7.5.2/bin/logstash-plugin install ./logstash-filter-mongodb_guardium_filter-?.?.?.gem - -Notes: -* Replace "?" with this plug-in version. -* The logstash-plugin may not handle relative paths well. It is recommended that you install the GEM from a simple path, as in the above example. - -To test the filter using your local Logstash installation, run this command: - - $ logstash -f ./filter-test-generator.conf --config.reload.automatic - ## Configuring audit logs on MongoDB and forwarding to Guardium via Filebeat First, configure the MongoDB native audit logs so that they can be parsed by Guardium. Then, configure Filebeat to forward the audit logs to the Guardium universal connector. This implementation supports Linux and Windows database servers. @@ -155,7 +115,7 @@ First, configure the MongoDB native audit logs so that they can be parsed by Gua - Filebeat must be installed on your database server. For more information on installation, see [https://www.elastic.co/guide/en/beats/filebeat/current/setup-repositories.html\#\_yum](https://www.elastic.co/guide/en/beats/filebeat/current/setup-repositories.html#_yum). The recommended Filebeat version is 7.5.0 and higher. - Native audit configuration is performed by the database admin. - Filebeat cannot handle messages over approximately 1 GB. Make sure the MongoDB does not save files larger than this limit \(by using `logRotate`\). File messages that exceed the limit are dropped. -- You can configure multiple collectors simultaneously by using GIM \([Configuring the GIM client to handle Filebeat and Syslog on MongoDB](https://github.com/IBM/universal-connectors/blob/main/docs/general%20topics/GIM.md). If you configure collectors manually, you need to configure them individually. +- You can configure multiple collectors simultaneously by using GIM [Configuring the GIM client to handle Filebeat and Syslog on MongoDB](https://github.com/IBM/universal-connectors/blob/main/docs/general%20topics/GIM.md). If you configure collectors manually, you need to configure them individually. - For more information about MongoDB native audit, see [https://docs.mongodb.com/manual/core/auditing/](https://docs.mongodb.com/manual/core/auditing/). ### Procedure @@ -164,62 +124,56 @@ First, configure the MongoDB native audit logs so that they can be parsed by Gua a. Configure the AuditLog section in the mongod.conf file. - - `destination`: file - - `format`: JSON - - `path`: /var/log/mongodb/.json, for example /var/log/mongodb/auditLog.json + - destination: file + - format: JSON + - path: /var/log/mongodb/.json for example /var/log/mongodb/auditLog.json b. Add the following field to audit the `auditAuthorizationSuccess` messages: - - - setParameter: {auditAuthorizationSuccess: **true**} - +``` + setParameter: {auditAuthorizationSuccess: true} +``` c. Add or uncomment the security section and edit the following parameter: - - authorization: **enabled** + ``` + authorization: enabled + ``` d. `filter`: For the Guardium universal connector MongoDB filter to handle events properly, a few conditions must exist: - - MongoDB access control must be set. \(Messages without users are removed.\) - - - `authCheck` and `authenticate events` are not filtered out from the MongoDB audit log messages. Verify that the filter section contains at least the following commands: - - - - '{ atype: { $in: ["authCheck", "authenticate"] }' - - - To narrow down the events, you can tweak the filter. - - - To audit only the delete actions made in MongoDB, for example, add the following suffix to the filter section: - - - '{ atype: { $in: ["authCheck", "authenticate"] } ' - "param.command": { $in: [" - delete"] } }' - - - - Auditing all commands can lead to excessive records. To prevent performance issues, make sure you have `authCheck` and `authenticate` log types, and any other commands you want to see. The filter parameters are an allowed list. They define what you see in the logs, not what is filtered from the logs. For more information about the MongoDB filter, see [Configuring Audit Filters](https://docs.mongodb.com/manual/tutorial/configure-audit-filters/) and [Configuring Filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation-configuration.html#installation). - - **Note:** The spaces in the configuration file are important, and must be located in the file as presented here. - - After configuration, the file has these lines: - - - ... - auditLog: - destination: file - format: JSON - path: /var/lib/mongo/auditLog.json - filter: '{ atype: { $in: ["authCheck", "authenticate"] } , "param.command": { $in: ["delete"] } }' - setParameter: {auditAuthorizationSuccess: true} - ... - security: - authorization: enabled - + * MongoDB access control must be set. (Messages without users are removed.) + * `authCheck` and `authenticate` events are not filtered out from the MongoDB audit log messages. + Verify that the filter section contains at least the following commands: + ``` + '{ atype: { $in: ["authCheck", "authenticate"] }' + ``` + To narrow down the events, you can tweak the filter. + For example, To audit only the delete actions made in MongoDB, add the following suffix to the filter section: + ``` + '{ atype: { $in: ["authCheck", "authenticate"] } ' + "param.command": { $in: [" + delete"] } }' + ``` + * Auditing all commands can lead to excessive records. To prevent performance issues, make sure you have `authCheck` and `authenticate` log types, and any other commands you want to see. The filter parameters are an allowed list. They define what you see in the logs, not what is filtered from the logs. For more information about the MongoDB filter, see [Configuring Audit Filters](https://docs.mongodb.com/manual/tutorial/configure-audit-filters/) and [Configuring Filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation-configuration.html#installation). + + **Note:** The spaces in the configuration file are important, and must be located in the file as presented here. + + e. After configuration, the file has these lines: +``` + ... + auditLog: + destination: file + format: JSON + path: /var/lib/mongo/auditLog.json + filter: '{"$or": [{ atype: { $ne: ["authCheck"] }, "param.command": { $in: [ "find", "insert", "delete", "update", "findandmodify", "create", "drop", "mapReduce", "applyOps", "eval", "resetError","renameCollection","adminCommand"] } },{ atype: "authCheck", "param.command": { $in: ["aggregate"]}}]}' + setParameter: {auditAuthorizationSuccess: true} + ... + security: + authorization: enabled +``` **Important:** The MongoDB needs to be restarted for the configuration changes to take effect. + 2. Configure the Filebeat data shipper to forward the audit logs to the Guardium universal connector. In the file filebeat.yml, usually located in /etc/filebeat/filebeat.yml, modify the Filebeat inputs section. a. Select a template from the Universal Connector page and enter your desired port in the port line, beginning at port 5001. \(Use a new port for each new future connection.\) Save the configuration. @@ -229,17 +183,17 @@ First, configure the MongoDB native audit logs so that they can be parsed by Gua filebeat.inputs - type: log - enabled: **true** + enabled: true paths: - - **/var/log/mongodb/auditLog.json** + - /var/log/mongodb/auditLog.json #- c:\programdata\elasticsearch\logs\* tags: ["mongodb"] c. If you send multiple, different data sources from the same server on the same port: -- Attach a different tag to each input log. Then, use the tags when you configure the connector -- Use the ```tags``` parameter from the following code while configuring the connector: + - Attach a different tag to each input log. Then, use the tags when you configure the connector + - Use the ```tags``` parameter from the following code while configuring the connector: # ============================== Filebeat inputs =============================== @@ -251,74 +205,29 @@ First, configure the MongoDB native audit logs so that they can be parsed by Gua # Change to true to enable this input configuration. enabled: true # Paths that should be crawled and fetched. Glob based paths. - paths:-/var/lib/mongo/auditLog.json + paths: /var/lib/mongo/auditLog.json tags: ["mongodb"] d. In the Outputs section: -- Make sure that Elasticsearch output is commented out. - - Add or uncomment the Logstash output and edit the following parameters: - - Add all the Guardium Universal Connector IPs and ports: + - Make sure that Elasticsearch output is commented out. + - Add or uncomment the Logstash output and edit the following parameters: + - Add all the Guardium Universal Connector IPs and ports: - - hosts: **hosts: \[“:”,”:,”:”...\]** + hosts: [“:”,”:,”:”...] - - Use the same port you selected when configuring the Universal Connector. - - Enable load balancing: + - Use the same port you selected when configuring the Universal Connector. + - Enable load balancing: - loadbalance: **true** + loadbalance: true - For more information on Elastic's Filebeat load-balancing, see: [https://www.elastic.co/guide/en/beats/filebeat/current/load-balancing.html](https://www.elastic.co/guide/en/beats/filebeat/7.17/load-balancing.html) - More optional parameters are described in the Elastic official documentation: [https://www.elastic.co/guide/en/beats/filebeat/current/logstash-output.html](https://www.elastic.co/guide/en/beats/filebeat/current/logstash-output.html) - A typical original log file looks like: - - ``` - { "atype" : "authCheck", "ts" : { "$date" : "2020-02-16T03:21:58.185-0500" }, "local" : { "ip" : "127.0.30.1", "port" : 0 }, "remote" : { "ip" : "127.0.20.1", "port" : 0 }, "users" : [], "roles" : [], "param" : { "command" : "find", "ns" : "config.transactions", "args" : { "find" : "transactions", "filter" : { "lastWriteDate" : { "$lt" : { "$date" : "2020-02-16T02:51:58.185-0500" } } }, "projection" : { "_id" : 1 }, "sort" : { "_id" : 1 }, "$db" : "config" } }, "result" : 0 } - ``` - - The Filebeat version of the same file looks like: - - - { - "@version" => "1", - "input" => { "type" => "log"}, - "tags" => [[0] "beats_input_codec_plain_applied"], - "@timestamp" => 2020-06-11T13:46:20.663Z, - "log" => {"offset" => 1997890,"file" => { "path" =>"C:\\Users\\Name\\Desktop\\p1.log" }}, - "ecs" => {"version" => "1.4.0"}, - "type" => "filebeat", - "agent" => { - "ephemeral_id" => - "b7d849f9-dfa9-4d27-be8c-20061b1facdf", - "id" => - "a54b2184-0bb5-4683-a039-7e1c70f1a57c", - "version" => "7.6.2", - "type" => "filebeat", - "hostname" => "" - }, - "message" =>"{ \"atype\" : \"authCheck\", \"ts\" : { \"$date\" : \"2020-02-16T03:21:58.185-0500\" }, \"local\" : { \"ip\" : \"127.0.30.1\", \"port\" : 0 }, \"remote\" : { \"ip\" : \"127.0.20.1\", \"port\" : 0 }, \"users\" : [], \"roles\" : [], \"param\" : { \"command\" : \"find\", \"ns\" : \"config.transactions\", \"args\" : { \"find\" : \"transactions\", \"filter\" : { \"lastWriteDate\" : { \"$lt\" : { \"$date\" : \"2020-02-16T02:51:58.185-0500\" } } }, \"projection\" : { \"_id\" : 1 }, \"sort\" : { \"_id\" : 1 }, \"$db\" : \"config\" } }, \"result\" : 0 }", - "host" => { - "architecture" => - "x86_64", - "id" => "d4e2c297-47bf-443a-8af8-e921715ed047", - "os" => { - "version" => "10.0", - "kernel" => "10.0.18362.836 (WinBuild.160101.0800)", - "build" => "18363.836", - "name" => "Windows 10 Enterprise", - "platform" => "windows", - "family" => "windows" - }, - "name" => "", - "hostname" => "" - } - } - 3. Restart Filebeat to effect these changes. @@ -333,9 +242,130 @@ First, configure the MongoDB native audit logs so that they can be parsed by Gua #### For details on configuring Filebeat connection over SSL, refer [Configuring Filebeat to push logs to Guardium](https://github.com/IBM/universal-connectors/blob/main/input-plugin/logstash-input-beats/README.md#configuring-filebeat-to-push-logs-to-guardium). -### What to do next +## Configuring Syslog to push logs to Guardium + +### Syslogs configuration: +To make the Logstash able to process the data collected by syslogs, configure available +syslog utility. The example is based on rsyslog utility available in many +versions of the Linux distributions. To check the service is active and running, execute the below +command: + +```text +systemctl status rsyslog +``` -Enable the universal connector on your collector. [Enabling the Guardium universal connector on collectors](https://www.ibm.com/docs/en/SSMPHH_11.4.0/com.ibm.guardium.doc.stap/guc/cfg_guc_input_filters.html) +#### Rsyslog installation guide: +* [Ubuntu](https://www.rsyslog.com/ubuntu-repository) +* [RHEL](https://www.rsyslog.com/rhelcentos-rpms) + +1. Generate Certificate Authority (CA): + * **Guardium Data Protection**
+ To obtain the Certificate Authority content on the Collector, run the following API command: + ```text + grdapi generate_ssl_key_universal_connector + ``` + This API command will display the content of the public Certificate Authority. Copy this certificate authority content to your database source and save it as a file named 'ca.pem' . + + * **Guardium Data Security Center - SaaS**
+ Refer to the instructions provided [here](https://www.ibm.com/docs/en/gdsc/saas?topic=connector-connecting-data-source-by-using-universal#plugin_connection_configuration__title__15) to obtain the Certificate Authority + and connection details for Guardium Insights-SaaS. +2. Create a file with name `mongo_syslog.conf` in the /etc/rsyslog.d/ directory with the content below in the +snippet and change the values of target and port, + ```text + global(DefaultNetstreamDriverCAFile="/path/to/ca_file/ca.pem") + # The template for message formatting + $template UcMessageFormat,"%HOSTNAME%,,%msg%" + + module(load="imfile") + ruleset(name="imfile_to_gdp") { + action(type="omfwd" + protocol="tcp" + StreamDriver="gtls" + StreamDriverMode="1" + StreamDriverAuthMode="x509/certvalid" + template="UcMessageFormat" + target="" + port="") + } + + input( + type="imfile" + file="/path/to/logs/directory/auditLog.json" + # Keep the value of tag below as same as here, + tag="syslog" + ruleset="imfile_to_gdp" + ) + ``` + This configuration reads the logs from the MongoDB log directory path and sends +the syslog messages to the provided host (target_host) at the provided port (target_port).

+ + **NOTE**: For further configuration requirements that are specific to Guardium Insights - SaaS +environment, please follow the instructions provided [here](https://github.com/IBM/universal-connectors/blob/main/docs/Guardium%20Insights/SaaS_1.0/UC_Configuration_GI.md#tcp-input-plug-in-configuration-for-connection-with-syslog). +

+ +3. Include this file in the main rsyslog configurations file. + 1. Open the file `/etc/rsyslog.conf`. + 2. Append the below line at the end. + ```text + $IncludeConfig /etc/rsyslog.d/mongo_syslog.conf + ``` +4. Restart the rsyslog utility. + ```text + systemctl restart rsyslog + ``` + +## Configuring the MongoDB filters in Guardium +The Guardium universal connector is the Guardium entry point for native audit logs. The universal connector identifies and parses received events, and then converts them to a standard Guardium format. The output of the universal connector is forwarded to the Guardium sniffer on the collector, for policy and auditing enforcements. Configure Guardium to read the native audit logs by customizing the MongoDB template. + +**Important** + +• Starting with Guardium Data Protection version 12.1, you can configuring the Universal Connectors in 2 ways. You can either use the legacy flow or the new flow. + +• To configure Universal Connector by using the new flow, see [Managing universal connector configuration](https://www.ibm.com/docs/en/gdp/12.x?topic=connector-managing-universal-configuration) on the Guardium Universal Connector page. + +• To configure the Universal Connector by using the legacy flow, use the procedure in this topic. + +### Limitations +* The filter supports events sent through Syslog or Filebeat. It relies on the "mongod:" or "mongos:" prefixes in +the event message for the JSON portion of the audit to be parsed. +* Field **server_hostname** (required) - Server hostname is expected (extracted from the nested field "name" +inside the host object of the Filebeat message). +* Field **server_ip** - States the IP address of the MongoDB server, if it is available to the +filter plug-in. The filter will use this IP address instead of localhost IP addresses +that are reported by MongoDB, if actions were performed directly on the database server. +* The client "Source program" is not available in messages sent by MongoDB. This is because +this data is sent only in the first audit log message upon database connection - and the +filter plug-in doesn't aggregate data from different messages. + + +### Before You Begin +* Configure the policies you require. See [policies](https://github.com/IBM/universal-connectors/blob/main/docs/Guardium%20Data%20Protection/uc_policies_gdp.md) for more information. +* You must have permission for the S-Tap Management role. The admin user includes this role by default. + + +### Configuration +1. On the collector, go to ```Setup``` > ```Tools and Views``` > ```Configure Universal Connector```. +2. Enable the universal connector if it is disabled. +3. Click the plus sign to open the Connector Configuration dialog box. +4. Type a name in the ```Connector name``` field. +5. Update the input section, + 1. To collect data over Filebeat, add the details from [mongoDBFilebeat.conf](./MongodbOverFilebeatPackage/mongodbFilebeat.conf) + file input section, omitting the keyword "input{" at the beginning and its corresponding "}" + at the end. + 2. To collect data over Syslogs, add the details from [mongoDBSyslog.conf](./MongoDBOverSyslogPackage/mongodbSyslog.conf) file input section, + omitting the keyword "input{" at the beginning and its corresponding "}" at the end. + 3. To collect data over Mongo Atlas API, add the details from [mongoAtlas.conf](./MongodbOverMongoAtlasPackage/mongodbAtlas.conf) file input section, + omitting the keyword "input{" at the beginning and its corresponding "}" at the end. +6. Update the filter section, + 1. To filter the data collected from the Filebeat, add the details from the + [mongoDBFilebeat.conf](./MongodbOverFilebeatPackage/mongodbFilebeat.conf) file filter section, omitting the keyword + "filter{" at the beginning and its corresponding "}" at the end. + 2. To filter the data collected from the Syslogs, add the details from the + [mongoDBSyslog.conf](MongoDBOverSyslogPackage/mongodbSyslog.conf) file filter section, + omitting the keyword "filter{" at the beginning and its corresponding "}" at the end. + 3. To filter the data collected from the Mongo Atlas API, add the details from the [mongoAtlas.conf](./MongodbOverMongoAtlasPackage/mongodbAtlas.conf) file filter section, omitting the keyword "filter{" at the beginning and its corresponding "}" at the end. +7. The "type" fields should match in the input and the filter configuration sections. This field should be unique for every individual connector added. This is no longer required starting v12p20 and v12.1. +8. Click ```Save```. Guardium validates the new connector, and enables the universal connector if it was disabled. After it is validated, it appears in the Configure Universal Connector page. ## Configuring the MongoDB filters in Guardium Data Security Center diff --git a/filter-plugin/logstash-filter-opensearch-guardium/CHANGELOG.md b/filter-plugin/logstash-filter-opensearch-guardium/CHANGELOG.md new file mode 100644 index 000000000..143dd1602 --- /dev/null +++ b/filter-plugin/logstash-filter-opensearch-guardium/CHANGELOG.md @@ -0,0 +1,10 @@ + +# Changelog +Notable changes will be documented in this file. + + + +## [] + +### Added +- Initial release, in parallel to Guardium . diff --git a/filter-plugin/logstash-filter-opensearch-guardium/LICENSE b/filter-plugin/logstash-filter-opensearch-guardium/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/filter-plugin/logstash-filter-opensearch-guardium/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/filter-plugin/logstash-filter-opensearch-guardium/OpenSearchOverCloudwatchPackage/opensearch.conf b/filter-plugin/logstash-filter-opensearch-guardium/OpenSearchOverCloudwatchPackage/opensearch.conf new file mode 100644 index 000000000..d6b3fcef9 --- /dev/null +++ b/filter-plugin/logstash-filter-opensearch-guardium/OpenSearchOverCloudwatchPackage/opensearch.conf @@ -0,0 +1,21 @@ +input{ + cloudwatch { + log_group => [""] #example: ["/aws/rds/instance/mariadb-aws-database/audit"] + region => "" #Region that has the DB,example: ap-south-1a + codec => plain + sincedb_path => "NUL" + access_key_id => "" + secret_access_key => "" + type => "opensearch" + event_filter => '' + start_position => "end" + add_field => {"account_id" => ""} + type => "opensearch" + } +} +filter { + if ([type] == "opensearch"){ + opensearch_guardium_filter{} + } +} + diff --git a/filter-plugin/logstash-filter-opensearch-guardium/README.md b/filter-plugin/logstash-filter-opensearch-guardium/README.md new file mode 100644 index 000000000..e4840b1b5 --- /dev/null +++ b/filter-plugin/logstash-filter-opensearch-guardium/README.md @@ -0,0 +1,96 @@ +# Amazon OpenSearch - Guardium Logstash filter plug-in + +### Meet OpenSearch + +* Tested versions: v1 +* Environment: AWS +* Supported inputs: CloudWatch (pull) +* Supported Guardium versions: + * Guardium Data Protection 12.2 and later + +This is a [Logstash](https://github.com/elastic/logstash) filter plug-in for the universal connector that is featured in +IBM Security Guardium. It parses events and messages from the Amazon OpenSearch audit log into +a Guardium Record. + +The plug-in is free and open-source (Apache 2.0). It can be used as a starting point to develop additional filter +plug-ins for Guardium universal connector. + +## Configuration + +### OpenSearch Setup + +1. [Prerequisites](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/setting-up.html) +2. Go to https://console.aws.amazon.com/. +3. Search and navigate to ```Amazon OpenSearch Service```. +4. To create an OpenSearch domain, refer to the [Getting started with Amazon OpenSearch Service guide](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/gsg.html). + +### Enabling Audit Logs + +1. Enable audit logs for **CloudWatch Logs** and **OpenSearch Dashboard**, refer to the [Enabling Audit logs](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/audit-logs.html#audit-log-enabling). + +### Viewing Audit Logs on CloudWatch + +By default, each database instance has an associated log group with a name in this format: `/aws/OpenSearchService//audit` and `/aws/OpenSearchService//profiler`. + +#### Procedure + +1. Open the CloudWatch console https://console.aws.amazon.com/cloudwatch/. +2. In the navigation pane, choose ```Log groups```. +3. Choose the ```log group``` that you specified while enabling audit logs. Within the log group, OpenSearch Service creates a log stream for each node in your domain. +4. In the ```Log streams```, select ```Search all```. +5. For the read and write events, see the corresponding logs. This process may take several seconds. + +#### Supported Audit Log Types + +Cluster communication occurs over two separate layers: **REST layer** and **Transport layer**. The following is the list of Audit log Categories, with their availability determined by the communication layers. + +* FAILED_LOGIN +* MISSING_PRIVILEGES +* BAD_HEADERS +* SSL_EXCEPTION +* GRANTED_PRIVILEGES +* OPENSEARCH_SECURITY_INDEX_ATTEMPT +* AUTHENTICATED +* INDEX_EVENT +* COMPLIANCE_DOC_READ +* COMPLIANCE_DOC_WRITE +* COMPLIANCE_INTERNAL_CONFIG_READ +* COMPLIANCE_INTERNAL_CONFIG_WRITE + + + +For more information about the audit logging category and layers, refer to the [Audit log layers and categories](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/audit-logs.html#audit-log-layers). + +For more information about the audit logging fields, refer to the [Audit log field reference](https://docs.opensearch.org/docs/latest/security/audit-logs/field-reference/). + +**Note:** OpenSearch generates a large volume of background audit logs by default. We recommend configuring the audit settings appropriately to limit unnecessary entries in the audit logs. + +### Limitations +- Audit logging in OpenSearch can be accessed in two different ways – via the OpenSearch Dashboards or through CloudWatch Logs. However, this filter plugin only parses and processes audit logs that are streamed to CloudWatch. Audit logs stored directly in OpenSearch indices or viewed in the Dashboards are not supported for parsing. +- OpenSearch may log FAILED_LOGIN REST messages during idle periods in Dev Tools due to background requests (like session keep-alives or auth checks) failing authentication, even if no user-initiated requests are made. +- Certain reserved keywords (template, mappings, get, aliases, user) are automatically prefixed with an underscore (_) during sanitization to prevent OpenSearch URI parsing errors or endpoint conflicts. +- Based on the enabled audit options, multiple entries may appear on the S-TAP page when the server is accessed through different ports. + +## Guardium Data Protection + +The Guardium universal connector is the Guardium entry point for native audit/data_access logs. The Guardium universal connector identifies and parses the received events, and converts them to a standard Guardium format. The output of the Guardium universal connector is forwarded to the Guardium sniffer on the collector, for policy and auditing enforcements. + +### Before you begin +* Configure the policies you require. See [policies](/docs/#policies) for more information. +* You must have permission for the S-Tap Management role. The admin user includes this role by default +* Download the [logstash-filter-aws_opensearch_guardium_filter](logstash-filter-opensearch_guardium_filter.zip) plug-in. + +### Procedure +1. On the collector, go to ```Setup``` > ```Tools and Views``` > ```Configure Universal Connector```. +2. Enable the universal connector if it is disabled. +3. Click ```Upload File``` and select the offline [logstash-filter-aws_opensearch_guardium_filter](logstash-filter-opensearch_guardium_filter.zip) plug-in. After it is uploaded, click ```OK```. +4. Click ```Upload File``` and select the key.json file. After it is uploaded, click ```OK```. +5. Click the Plus sign to open the Connector Configuration dialog box. +6. Type a name in the Connector name field. +7. Update the input section to add the details from the [opensearch.conf](OpenSearchOverCloudwatchPackage/opensearch.conf) file's input part, omitting the keyword "input{" at the beginning and its corresponding "}" at the end. +8. Update the filter section to add the details from the [opensearch.conf](OpenSearchOverCloudwatchPackage/opensearch.conf) file's filter part, omitting the keyword "filter{" at the beginning and its corresponding "}" at the end. +9. The 'type' fields should match in the input and filter configuration sections. This field should be unique for every individual connector added. +10. Click ```Save```. Guardium validates the new connector and displays it in the Configure Universal Connector page. +11. After the offline plug-in is installed and the configuration is uploaded and saved in the Guardium machine, restart the Universal Connector using the ```Disable/Enable``` button. + + diff --git a/filter-plugin/logstash-filter-opensearch-guardium/VERSION b/filter-plugin/logstash-filter-opensearch-guardium/VERSION new file mode 100644 index 000000000..afaf360d3 --- /dev/null +++ b/filter-plugin/logstash-filter-opensearch-guardium/VERSION @@ -0,0 +1 @@ +1.0.0 \ No newline at end of file diff --git a/filter-plugin/logstash-filter-opensearch-guardium/build.gradle b/filter-plugin/logstash-filter-opensearch-guardium/build.gradle new file mode 100644 index 000000000..e9d7aa237 --- /dev/null +++ b/filter-plugin/logstash-filter-opensearch-guardium/build.gradle @@ -0,0 +1,200 @@ +import java.nio.file.Files +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING + +apply plugin: 'java' +apply plugin: 'jacoco' +apply from: LOGSTASH_CORE_PATH + "/../rubyUtils.gradle" +apply plugin: "eclipse" + + +// =========================================================================== +// plugin info +// =========================================================================== +group "com.ibm.guardium.aws.opensearch" // must match the package of the main plugin class +version "${file("VERSION").text.trim()}" // read from required VERSION file +description = "AWS OpenSearch Guardium Filter Plugin" +pluginInfo.licenses = ['Apache-2.0'] // list of SPDX license IDs +pluginInfo.longDescription = "This gem is a Logstash OpenSearch filter plugin required to be installed as part of IBM Security Guardium, Guardium Universal connector configuration. This gem is not a stand-alone program." +pluginInfo.authors = ['IBM', '', ''] +pluginInfo.email = [''] +pluginInfo.homepage = "http://www.elastic.co/guide/en/logstash/current/index.html" +pluginInfo.pluginType = "filter" +pluginInfo.pluginClass = "OpensearchGuardiumFilter" +pluginInfo.pluginName = "opensearch_guardium_filter" // must match the @LogstashPlugin annotation in the main plugin class +// =========================================================================== + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 + +def jacocoVersion = '0.8.4' +// minimumCoverage can be set by Travis ENV +def minimumCoverageStr = System.getenv("MINIMUM_COVERAGE") ?: "50.0%" +if (minimumCoverageStr.endsWith("%")) { + minimumCoverageStr = minimumCoverageStr.substring(0, minimumCoverageStr.length() - 1) +} +def minimumCoverage = Float.valueOf(minimumCoverageStr) / 100 + +buildscript { + repositories { + maven { + url "https://plugins.gradle.org/m2/" + } + mavenCentral() + jcenter() + } + + dependencies { + classpath 'com.github.jengelman.gradle.plugins:shadow:4.0.4' + classpath "org.barfuin.gradle.jacocolog:gradle-jacoco-log:3.0.0-RC2" + classpath group: 'org.yaml', name: 'snakeyaml', version: '2.2' + } + +} + +def universalConnectorsDir = project.projectDir.parentFile?.parentFile.toString(); +def versions = new org.yaml.snakeyaml.Yaml().load(new File("${universalConnectorsDir}/versions.yml").newInputStream()) + + +repositories { + mavenCentral() +} + +tasks.register("vendor") { + dependsOn shadowJar + doLast { + String vendorPathPrefix = "vendor/jar-dependencies" + String projectGroupPath = project.group.replaceAll('\\.', '/') + File projectJarFile = file("${vendorPathPrefix}/${projectGroupPath}/${pluginInfo.pluginFullName()}/${project.version}/${pluginInfo.pluginFullName()}-${project.version}.jar") + projectJarFile.mkdirs() + Files.copy(file("$buildDir/libs/${project.name}-${project.version}.jar").toPath(), projectJarFile.toPath(), REPLACE_EXISTING) + validatePluginJar(projectJarFile, project.group) + } +} + +apply plugin: 'com.github.johnrengelman.shadow' + +shadowJar { + classifier = null +} + + +dependencies { + implementation 'com.google.code.gson:gson:' + versions.dependencies.gson + implementation 'org.apache.commons:commons-lang3:' + versions.dependencies.commonsLang + implementation 'commons-validator:commons-validator:' + versions.dependencies.commonsValidator + implementation group: 'com.fasterxml.jackson.core', name: 'jackson-databind', version: '2.17.2' + + testImplementation group: 'org.mockito', name: 'mockito-all', version: versions.dependencies.mockitoAll + testImplementation 'org.junit.jupiter:junit-jupiter:' + versions.dependencies.junitJupiter + testImplementation 'org.jruby:jruby-complete:' + versions.dependencies.jrubyComplete + implementation fileTree(dir: GUARDIUM_UNIVERSALCONNECTOR_COMMONS_PATH, include: "common-*.*.*.jar") + implementation fileTree(dir: LOGSTASH_CORE_PATH, include: "build/libs/logstash-core-*.*.*.jar") + implementation group: 'org.apache.logging.log4j', name: 'log4j-core', version: versions.dependencies.log4jCore + implementation group: 'org.json', name: 'json', version: versions.dependencies.json + implementation group: 'org.parboiled', name: 'parboiled-java', version: versions.dependencies.parboiledJava + implementation group: 'org.glassfish', name: 'javax.json', version: versions.dependencies.javaxJson + testImplementation fileTree(dir: GUARDIUM_UNIVERSALCONNECTOR_COMMONS_PATH, include: "common-*.*.*.jar") + testImplementation 'junit:junit:4.13.1' +} + +clean { + delete "${projectDir}/Gemfile" + delete "${projectDir}/" + pluginInfo.pluginFullName() + ".gemspec" + delete "${projectDir}/lib/" + delete "${projectDir}/vendor/" + new FileNameFinder().getFileNames(projectDir.toString(), pluginInfo.pluginFullName() + "-*.*.*.gem").each { filename -> + delete filename + } +} + +tasks.withType(JavaCompile) { + options.encoding = 'UTF-8' +} +test { + useJUnitPlatform() +} +tasks.register("generateRubySupportFiles") { + doLast { + generateRubySupportFilesForPlugin(project.description, project.group, version) + } +} + +tasks.register("removeObsoleteJars") { + doLast { + new FileNameFinder().getFileNames( + projectDir.toString(), + "vendor/**/" + pluginInfo.pluginFullName() + "*.jar", + "vendor/**/" + pluginInfo.pluginFullName() + "-" + version + ".jar").each { f -> + delete f + } + } +} + +tasks.register("gem") { + dependsOn = [downloadAndInstallJRuby, removeObsoleteJars, vendor, generateRubySupportFiles] + doLast { + buildGem(projectDir, buildDir, pluginInfo.pluginFullName() + ".gemspec") + } +} + +tasks.register("copyDependencyLibs", Copy) { + into "dependenciesLib" + from configurations.compileClasspath + from configurations.runtimeClasspath + from configurations.testCompileClasspath + from configurations.testRuntimeClasspath +} + +apply plugin: 'jacoco' +//apply plugin: 'org.barfuin.gradle.jacocolog' version '2.0.0' +apply plugin: "org.barfuin.gradle.jacocolog" +// ------------------------------------ +// JaCoCo is a code coverage tool +// ------------------------------------ +jacoco { + toolVersion = "${jacocoVersion}" + reportsDir = file("$buildDir/reports/jacoco") +} +jacocoTestReport { + // You will see "Report -> file://...." at the end of a JaCoCo build + // If no output, run this first: ./gradlew test + reports { + html.enabled true + xml.enabled true + csv.enabled true + html.destination file("${buildDir}/reports/jacoco") + csv.destination file("${buildDir}/reports/jacoco/all.csv") + } + executionData.from fileTree(dir: "${buildDir}/jacoco/", includes: [ + '**/*.exec' + ]) + afterEvaluate { + // objective is to test TicketingService class + classDirectories.setFrom(files(classDirectories.files.collect { + fileTree(dir: it, exclude: []) + })) + } + doLast { + println "Report -> file://${buildDir}/reports/jacoco/index.html" + } +} +test.finalizedBy jacocoTestReport +jacocoTestCoverageVerification { + violationRules { + rule { + limit { + minimum = minimumCoverage + } + } + } + executionData.from fileTree(dir: "${buildDir}/jacoco/", includes: [ + '**/*.exec' + ]) + afterEvaluate { + // objective is to test TicketingService class + classDirectories.setFrom(files(classDirectories.files.collect { + fileTree(dir: it, exclude: []) + })) + } +} +project.tasks.check.dependsOn(jacocoTestCoverageVerification, jacocoTestReport) \ No newline at end of file diff --git a/filter-plugin/logstash-filter-opensearch-guardium/gradle/wrapper/gradle-wrapper.jar b/filter-plugin/logstash-filter-opensearch-guardium/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 000000000..7454180f2 Binary files /dev/null and b/filter-plugin/logstash-filter-opensearch-guardium/gradle/wrapper/gradle-wrapper.jar differ diff --git a/filter-plugin/logstash-filter-opensearch-guardium/gradle/wrapper/gradle-wrapper.properties b/filter-plugin/logstash-filter-opensearch-guardium/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 000000000..aa991fcea --- /dev/null +++ b/filter-plugin/logstash-filter-opensearch-guardium/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,5 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-7.4.2-bin.zip +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/filter-plugin/logstash-filter-opensearch-guardium/gradlew b/filter-plugin/logstash-filter-opensearch-guardium/gradlew new file mode 100755 index 000000000..744e882ed --- /dev/null +++ b/filter-plugin/logstash-filter-opensearch-guardium/gradlew @@ -0,0 +1,185 @@ +#!/usr/bin/env sh + +# +# Copyright 2015 the original author or authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MSYS* | MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin or MSYS, switch paths to Windows format before running java +if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=`expr $i + 1` + done + case $i in + 0) set -- ;; + 1) set -- "$args0" ;; + 2) set -- "$args0" "$args1" ;; + 3) set -- "$args0" "$args1" "$args2" ;; + 4) set -- "$args0" "$args1" "$args2" "$args3" ;; + 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=`save "$@"` + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +exec "$JAVACMD" "$@" diff --git a/filter-plugin/logstash-filter-opensearch-guardium/gradlew.bat b/filter-plugin/logstash-filter-opensearch-guardium/gradlew.bat new file mode 100644 index 000000000..107acd32c --- /dev/null +++ b/filter-plugin/logstash-filter-opensearch-guardium/gradlew.bat @@ -0,0 +1,89 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/filter-plugin/logstash-filter-opensearch-guardium/logstash-filter-opensearch_guardium_filter.zip b/filter-plugin/logstash-filter-opensearch-guardium/logstash-filter-opensearch_guardium_filter.zip new file mode 100644 index 000000000..ae14d4663 Binary files /dev/null and b/filter-plugin/logstash-filter-opensearch-guardium/logstash-filter-opensearch_guardium_filter.zip differ diff --git a/filter-plugin/logstash-filter-opensearch-guardium/settings.gradle b/filter-plugin/logstash-filter-opensearch-guardium/settings.gradle new file mode 100644 index 000000000..c65c1ca4e --- /dev/null +++ b/filter-plugin/logstash-filter-opensearch-guardium/settings.gradle @@ -0,0 +1,11 @@ +/* + * This file was generated by the Gradle 'init' task. + * + * The settings file is used to specify which projects to include in your build. + * + * Detailed information about configuring a multi-project build in Gradle can be found + * in the user manual at https://docs.gradle.org/7.1.1/userguide/multi_project_builds.html + */ + +rootProject.name = 'logstash-filter-opensearch-guardium' +include('lib') diff --git a/filter-plugin/logstash-filter-opensearch-guardium/src/main/java/com/ibm/guardium/aws/opensearch/CommonUtils.java b/filter-plugin/logstash-filter-opensearch-guardium/src/main/java/com/ibm/guardium/aws/opensearch/CommonUtils.java new file mode 100644 index 000000000..77a7e171f --- /dev/null +++ b/filter-plugin/logstash-filter-opensearch-guardium/src/main/java/com/ibm/guardium/aws/opensearch/CommonUtils.java @@ -0,0 +1,34 @@ +/* +Copyright IBM Corp. 2021, 2025 All rights reserved. +SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.guardium.aws.opensearch; + +import org.json.JSONException; +import org.json.JSONObject; + +public class CommonUtils { + + /** + * + */ + public CommonUtils() { + super(); + } + + /** + * isJSONValid() method is used to validate input string is valid JSON or NOT + * + * @return Boolean value TRUE/FALSE + * @methodName @isJSONValid + */ + public static boolean isJSONValid(String value) { + try { + new JSONObject(value); + } catch (JSONException ex) { + return false; + } + return true; + } +} diff --git a/filter-plugin/logstash-filter-opensearch-guardium/src/main/java/com/ibm/guardium/aws/opensearch/ConfigFileContent.java b/filter-plugin/logstash-filter-opensearch-guardium/src/main/java/com/ibm/guardium/aws/opensearch/ConfigFileContent.java new file mode 100644 index 000000000..c7abfdad2 --- /dev/null +++ b/filter-plugin/logstash-filter-opensearch-guardium/src/main/java/com/ibm/guardium/aws/opensearch/ConfigFileContent.java @@ -0,0 +1,52 @@ +package com.ibm.guardium.aws.opensearch; + +public class ConfigFileContent { + + public String getConfigFileContent() { + StringBuilder config = new StringBuilder("{\n"); + + // Common fields across all categories + config.append(" \"app_user_name\": \"audit_request_effective_user\",\n"); + config.append(" \"client_hostname\": \"audit_node_host_name\",\n"); + config.append(" \"client_ip\": \"audit_request_remote_address\",\n"); + config.append(" \"client_ipv6\": \"\",\n"); + config.append(" \"client_mac\": \"\",\n"); + config.append(" \"client_os\": \"\",\n"); + config.append(" \"client_port\": \"{-1}\",\n"); + config.append(" \"comm_protocol\": \"audit_request_layer\",\n"); + config.append(" \"construct\": \"audit_category\",\n"); + config.append(" \"db_name\": \"audit_cluster_name\",\n"); + config.append(" \"db_protocol\": \"{OPSEARCH}\",\n"); + config.append(" \"db_user\": \"audit_request_effective_user\",\n"); + config.append(" \"db_user_initiating_user\": \"audit_request_initiating_user\",\n"); + config.append(" \"server_hostname\": \"audit_node_name\",\n"); + config.append(" \"server_ip\": \"\",\n"); + config.append(" \"server_port\": \"{-1}\",\n"); + config.append(" \"server_type\": \"{Opensearch}\",\n"); + config.append(" \"service_name\": \"audit_cluster_name\",\n"); + config.append(" \"session_id\": \"audit_node_id\",\n"); + config.append(" \"source_program\": \"audit_request_origin\",\n"); + config.append(" \"sql_parsing_active\": \"true\",\n"); + config.append(" \"timestamp\": \"@timestamp\",\n"); + + // REST + config.append(" \"REST_PATH\": \"audit_rest_request_path\",\n"); + config.append(" \"REST_METHOD\": \"audit_rest_request_method\",\n"); + + // Transport + config.append(" \"TRANSPORT_AUTHENTICATED\": \"audit_transport_request_type\",\n"); + config.append(" \"TRANSPORT_FAILED_LOGIN\": \"audit_request_exception_stacktrace\",\n"); + config.append(" \"TRANSPORT_PRIVILEGE\": \"audit_request_privilege\",\n"); + + config.append(" \"COMPLIANCE_OPERATION\": \"audit_compliance_operation\",\n"); + config.append(" \"COMPLIANCE_DOC_INDEX\": \"audit_trace_resolved_indices[0]\",\n"); + + config.append(" \"parsing_format\": \"JSON\",\n"); + config.append(" \"parsing_type\": \"SNIFFER\",\n"); + config.append(" \"sniffer_parser\": \"OPEN_SEARCH\",\n"); + config.append(" \"TEXT\": \"TEXT\"\n"); + + config.append("}\n"); + return config.toString(); + } +} diff --git a/filter-plugin/logstash-filter-opensearch-guardium/src/main/java/com/ibm/guardium/aws/opensearch/Constants.java b/filter-plugin/logstash-filter-opensearch-guardium/src/main/java/com/ibm/guardium/aws/opensearch/Constants.java new file mode 100644 index 000000000..c8587555a --- /dev/null +++ b/filter-plugin/logstash-filter-opensearch-guardium/src/main/java/com/ibm/guardium/aws/opensearch/Constants.java @@ -0,0 +1,46 @@ +package com.ibm.guardium.aws.opensearch; + +public class Constants { + public static final String LOGSTASH_TAG_JSON_PARSE_ERROR = "_opensearch_guardium_json_parse_error"; + public static final String AUDIT_CATEGORY = "audit_category"; + public static final String AUDIT_REQUEST_LAYER = "audit_request_layer"; + //request type + public static final String REQUEST_TYPE_REST = "REST"; + public static final String REQUEST_TYPE_TRANSPORT = "TRANSPORT"; + public static final String CATEGORY_FAILED_LOGIN = "FAILED_LOGIN"; + public static final String CATEGORY_MISSING_PRIVILEGES = "MISSING_PRIVILEGES"; + + + //OpenSearch event categories + public static final String CATEGORY_BAD_HEADERS = "BAD_HEADERS"; + public static final String CATEGORY_SSL_EXCEPTION = "SSL_EXCEPTION"; + public static final String CATEGORY_GRANTED_PRIVILEGES = "GRANTED_PRIVILEGES"; + public static final String CATEGORY_OPENSEARCH_SECURITY_INDEX_ATTEMPT = "OPENSEARCH_SECURITY_INDEX_ATTEMPT"; + public static final String CATEGORY_AUTHENTICATED = "AUTHENTICATED"; + //rest + public static final String CATEGORY_REST_FAILED_LOGIN = "REST_FAILED_LOGIN"; + public static final String CATEGORY_REST_AUTHENTICATED = "REST_AUTHENTICATED"; + public static final String CATEGORY_REST_SSL_EXCEPTION = "REST_SSL_EXCEPTION"; + public static final String CATEGORY_REST_BAD_HEADERS = "REST_BAD_HEADERS"; + public static final String CATEGORY_REST_MISSING_PRIVILEGES = "REST_MISSING_PRIVILEGES"; + //transport + public static final String CATEGORY_TRANSPORT_FAILED_LOGIN = "TRANSPORT_FAILED_LOGIN"; + public static final String CATEGORY_TRANSPORT_AUTHENTICATED = "TRANSPORT_AUTHENTICATED"; + public static final String CATEGORY_TRANSPORT_MISSING_PRIVILEGES = "TRANSPORT_MISSING_PRIVILEGES"; + public static final String CATEGORY_TRANSPORT_GRANTED_PRIVILEGES = "TRANSPORT_GRANTED_PRIVILEGES"; + public static final String CATEGORY_TRANSPORT_SSL_EXCEPTION = "TRANSPORT_SSL_EXCEPTION"; + public static final String CATEGORY_TRANSPORT_BAD_HEADERS = "TRANSPORT_BAD_HEADERS"; + public static final String CATEGORY_TRANSPORT_SECURITY_INDEX_ATTEMPT = "TRANSPORT_OPENSEARCH_SECURITY_INDEX"; + //standard categories + public static final String CATEGORY_INDEX_EVENT = "INDEX_EVENT"; + public static final String CATEGORY_COMPLIANCE_DOC_READ = "COMPLIANCE_DOC_READ"; + public static final String CATEGORY_COMPLIANCE_DOC_WRITE = "COMPLIANCE_DOC_WRITE"; + public static final String CATEGORY_COMPLIANCE_INTERNAL_CONFIG_READ = "COMPLIANCE_INTERNAL_CONFIG_READ"; + public static final String CATEGORY_COMPLIANCE_INTERNAL_CONFIG_WRITE = "COMPLIANCE_INTERNAL_CONFIG_WRITE"; + public static final String LANGUAGE_STRING = "OPEN_SEARCH"; + public static final String DB_PROTOCOL = "OPEN_SEARCH"; + static final String MESSAGE = "message"; + static final String INVALID_MSG_OPENSEARCH = "OPENSEARCH_EVENT_IS_INVALID"; + + +} diff --git a/filter-plugin/logstash-filter-opensearch-guardium/src/main/java/com/ibm/guardium/aws/opensearch/OpensearchGuardiumFilter.java b/filter-plugin/logstash-filter-opensearch-guardium/src/main/java/com/ibm/guardium/aws/opensearch/OpensearchGuardiumFilter.java new file mode 100644 index 000000000..941b38579 --- /dev/null +++ b/filter-plugin/logstash-filter-opensearch-guardium/src/main/java/com/ibm/guardium/aws/opensearch/OpensearchGuardiumFilter.java @@ -0,0 +1,98 @@ +/* +Copyright IBM Corp. 2021, 2023 All rights reserved. +SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.guardium.aws.opensearch; + +import co.elastic.logstash.api.*; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonObject; +import com.ibm.guardium.universalconnector.commons.GuardConstants; +import com.ibm.guardium.universalconnector.commons.custom_parsing.ParserFactory; +import com.ibm.guardium.universalconnector.commons.structures.Record; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; + +import static com.ibm.guardium.aws.opensearch.Constants.*; + +@LogstashPlugin(name = "opensearch_guardium_filter") +public class OpensearchGuardiumFilter implements Filter { + private static Logger logger = LogManager.getLogger(OpensearchGuardiumFilter.class); + public static final PluginConfigSpec SOURCE_CONFIG = PluginConfigSpec.stringSetting("source", "message"); + private String id; + private Parser parser; + + public OpensearchGuardiumFilter(String id, Configuration config, Context context) { + this.id = id; + this.parser = new Parser(ParserFactory.ParserType.json); + } + + @Override + public Collection> configSchema() { + return Collections.singletonList(SOURCE_CONFIG); + } + + /** + * Returns the id + * + * @return id + */ + @Override + public String getId() { + return this.id; + } + + /** + * Filters the received events by skipping the invalid ones and normalizing them by parsing the provided payloads into Guardium Generic Records. + * + * @param events A list of received events + * @param filterMatchListener The listener for this plugin + * @return A list of normalized events + */ + public Collection filter(Collection events, FilterMatchListener filterMatchListener) { + ArrayList skippedEvents = new ArrayList<>(); + for (Event event : events) { + if (logger.isDebugEnabled()) { + logger.debug("Received event: {}", event.getData()); + } + + Object messageField = event.getField(MESSAGE); + String messageString = messageField.toString(); + + if (!CommonUtils.isJSONValid(messageString)) { + event.tag(INVALID_MSG_OPENSEARCH); + skippedEvents.add(event); + continue; + } + try { + JsonObject inputJSON = new Gson().fromJson(messageString, JsonObject.class); + Record record = parser.parseRecord(String.valueOf(inputJSON)); + if (record == null) { + event.tag(INVALID_MSG_OPENSEARCH); + skippedEvents.add(event); + continue; + } + Gson gson = new GsonBuilder() + .disableHtmlEscaping() + .serializeNulls() + .create(); + + event.setField(GuardConstants.GUARDIUM_RECORD_FIELD_NAME, gson.toJson(record)); + filterMatchListener.filterMatched(event); + } catch (Exception ex) { + logger.error("Exception in parsing message: {}", event.getData(), + ex); + event.tag(LOGSTASH_TAG_JSON_PARSE_ERROR); + } + + } + events.removeAll(skippedEvents); + return events; + } +} \ No newline at end of file diff --git a/filter-plugin/logstash-filter-opensearch-guardium/src/main/java/com/ibm/guardium/aws/opensearch/Parser.java b/filter-plugin/logstash-filter-opensearch-guardium/src/main/java/com/ibm/guardium/aws/opensearch/Parser.java new file mode 100644 index 000000000..47bc5ec44 --- /dev/null +++ b/filter-plugin/logstash-filter-opensearch-guardium/src/main/java/com/ibm/guardium/aws/opensearch/Parser.java @@ -0,0 +1,384 @@ +/* +Copyright IBM Corp. 2021, 2023 All rights reserved. +SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.guardium.aws.opensearch; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.ibm.guardium.universalconnector.commons.custom_parsing.CustomParser; +import com.ibm.guardium.universalconnector.commons.custom_parsing.ParserFactory; +import com.ibm.guardium.universalconnector.commons.structures.ExceptionRecord; +import com.ibm.guardium.universalconnector.commons.structures.Record; +import com.ibm.guardium.universalconnector.commons.structures.Time; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; + +import java.util.*; + +/** + * Parser Class will perform operation on parsing events and messages from the + * opensearch audit logs into a Guardium record instance Guardium records include + * the accessor, the sessionLocator, data, and exceptions. If there are no + * errors, the data contains details about the query "construct" + * + * @className @Parser + */ +public class Parser extends CustomParser { + private static Logger logger = LogManager.getLogger(Parser.class); + private final ObjectMapper mapper = new ObjectMapper(); + + public Parser(ParserFactory.ParserType parserType) { + super(parserType); + } + + @Override + public Record parseRecord(String payload) { + try { + String normalizedPayload = normalizeAuditCategory(payload); + return super.parseRecord(normalizedPayload); + } catch (Exception e) { + logger.error("Error extracting record: " + e.getMessage(), e); + return null; + } + } + + String normalizeAuditCategory(String payload) { + String category = getValueFromPayload(payload, Constants.AUDIT_CATEGORY); + if (category == null || category.isEmpty()) { + logger.error("Error normalizing audit category: " + category); + } + + try { + String layer = getValueFromPayload(payload, Constants.AUDIT_REQUEST_LAYER); + + + String normalizedCategory = category; + + if (layer.equals(Constants.REQUEST_TYPE_REST)) { + + if (category.equals(Constants.CATEGORY_FAILED_LOGIN)) { + normalizedCategory = Constants.CATEGORY_REST_FAILED_LOGIN; + } else if (category.equals(Constants.CATEGORY_AUTHENTICATED)) { + normalizedCategory = Constants.CATEGORY_REST_AUTHENTICATED; + } else if (category.equals(Constants.CATEGORY_SSL_EXCEPTION)) { + normalizedCategory = Constants.CATEGORY_REST_SSL_EXCEPTION; + } else if (category.equals(Constants.CATEGORY_BAD_HEADERS)) { + normalizedCategory = Constants.CATEGORY_REST_BAD_HEADERS; + } + } else if (layer.equals(Constants.REQUEST_TYPE_TRANSPORT)) { + + if (category.equals(Constants.CATEGORY_FAILED_LOGIN)) { + normalizedCategory = Constants.CATEGORY_TRANSPORT_FAILED_LOGIN; + } else if (category.equals(Constants.CATEGORY_AUTHENTICATED)) { + normalizedCategory = Constants.CATEGORY_TRANSPORT_AUTHENTICATED; + } else if (category.equals(Constants.CATEGORY_MISSING_PRIVILEGES)) { + normalizedCategory = Constants.CATEGORY_TRANSPORT_MISSING_PRIVILEGES; + } else if (category.equals(Constants.CATEGORY_GRANTED_PRIVILEGES)) { + normalizedCategory = Constants.CATEGORY_TRANSPORT_GRANTED_PRIVILEGES; + } else if (category.equals(Constants.CATEGORY_SSL_EXCEPTION)) { + normalizedCategory = Constants.CATEGORY_TRANSPORT_SSL_EXCEPTION; + } else if (category.equals(Constants.CATEGORY_REST_BAD_HEADERS)) { + normalizedCategory = Constants.CATEGORY_TRANSPORT_BAD_HEADERS; + } + } + + //standard category + if (normalizedCategory.equals(category)) { + if (category.equals(Constants.CATEGORY_INDEX_EVENT)) { + normalizedCategory = Constants.CATEGORY_INDEX_EVENT; + } else if (category.equals(Constants.CATEGORY_COMPLIANCE_DOC_READ)) { + normalizedCategory = Constants.CATEGORY_COMPLIANCE_DOC_READ; + } else if (category.equals(Constants.CATEGORY_COMPLIANCE_DOC_WRITE)) { + normalizedCategory = Constants.CATEGORY_COMPLIANCE_DOC_WRITE; + } else if (category.equals(Constants.CATEGORY_COMPLIANCE_INTERNAL_CONFIG_READ)) { + normalizedCategory = Constants.CATEGORY_COMPLIANCE_INTERNAL_CONFIG_READ; + } else if (category.equals(Constants.CATEGORY_COMPLIANCE_INTERNAL_CONFIG_WRITE)) { + normalizedCategory = Constants.CATEGORY_COMPLIANCE_INTERNAL_CONFIG_WRITE; + } + } + + if (!normalizedCategory.equals(category)) { + JsonNode rootNode = mapper.readTree(payload); + ((ObjectNode) rootNode).put(Constants.AUDIT_CATEGORY, normalizedCategory); + return rootNode.toString(); + } + + return payload; + } catch (Exception e) { + logger.error("Error normalizing audit category: " + e.getMessage(), e); + return payload; + } + } + + @Override + protected Record extractRecord(String payload) { + Record record = new Record(); + record.setSessionId(this.getSessionId(payload)); + record.setDbName(this.getDbName(payload)); + record.setAppUserName(this.getAppUserName(payload)); + String sqlString = this.getSqlString(payload); + record.setException(this.getException(payload, sqlString)); + record.setAccessor(this.getAccessor(payload)); + record.setSessionLocator(this.getSessionLocator(payload)); + record.setTime(this.getTimestamp(payload)); + record.setData(this.getData(payload, sqlString)); + return record; + } + + @Override + protected String parse(String payload, String key) { + if (key == null || key.isEmpty()) { + return null; + } + + try { + return getValueFromPayload(payload, key); + } catch (Exception e) { + logger.error("Error parsing key '{}' from payload: {}", key, e.getMessage(), e); + return null; + } + } + + String getValueFromPayload(String payload, String fieldName) { + if (fieldName == null || fieldName.isEmpty()) { + return null; + } + try { + JsonNode rootNode = mapper.readTree(payload); + + if (fieldName.contains("[") && fieldName.contains("]")) { + int arrayStart = fieldName.indexOf("["); + int arrayEnd = fieldName.indexOf("]"); + String arrayField = fieldName.substring(0, arrayStart); + int index = Integer.parseInt(fieldName.substring(arrayStart + 1, arrayEnd)); + + JsonNode arrayNode = rootNode.path(arrayField); + if (arrayNode.isArray() && arrayNode.size() > index) { + return arrayNode.get(index).asText(); + } + return ""; + } + + if (rootNode.has(fieldName)) { + JsonNode fieldNode = rootNode.get(fieldName); + if (fieldNode.isArray()) { + return fieldNode.toString(); + } else { + return fieldNode.asText(); + } + } + } catch (Exception e) { + logger.error("Error getting value from payload: " + e.getMessage(), e); + } + return ""; + } + @Override + protected String getSqlString(String payload) { + StringBuilder sb = new StringBuilder(); + + String category = getValueFromPayload(payload, Constants.AUDIT_CATEGORY); + String layer = getValueFromPayload(payload, Constants.AUDIT_REQUEST_LAYER); + + boolean compliance_write = Constants.CATEGORY_COMPLIANCE_DOC_WRITE.equals(category) || Constants.CATEGORY_COMPLIANCE_INTERNAL_CONFIG_WRITE.equals(category); + boolean compliance_read = Constants.CATEGORY_COMPLIANCE_DOC_READ.equals(category) || Constants.CATEGORY_COMPLIANCE_INTERNAL_CONFIG_READ.equals(category); + String requestType = ""; + + sb.append("__OPSEARCH "); + if (layer != null && !layer.isEmpty()) { + + if (Constants.REQUEST_TYPE_REST.equals(layer)) { + String method = getValueFromPayload(payload, "audit_rest_request_method"); + String path = checkURIPath(getValueFromPayload(payload, "audit_rest_request_path")); + + sb.append(method).append(" ").append(path).append(" "); + + } else if (Constants.REQUEST_TYPE_TRANSPORT.equals(layer)) { + requestType = getValueFromPayload(payload, "audit_transport_request_type"); + String requestPrivilege = checkURIPath(getValueFromPayload(payload, "audit_request_privilege")); + + sb.append(requestType).append(" ").append(requestPrivilege).append(" "); + } + } else { + if (compliance_read) { + sb.append("GET").append(" ").append("/"); + } else if (compliance_write) { + sb.append("POST").append(" ").append("/"); + } + } + + sb.append("#"); + + sb.append("{"); + + sb.append("\"category\":\"").append(category).append("\""); + if (compliance_write) { + String complianceOperation = getValueFromPayload(payload, "audit_compliance_operation"); + sb.append(", \"action\":\"").append(complianceOperation).append("\""); + } + + String body = getValueFromPayload(payload, "audit_request_body"); + if (body != null && !body.isEmpty()) { + sb.append(", \"_query\":").append(body); + } + + String resolvedIndex = getValueFromPayload(payload, "audit_trace_resolved_indices"); + if (resolvedIndex.isEmpty()){ + resolvedIndex = getValueFromPayload(payload, "audit_trace_indices"); + } + if (!resolvedIndex.isEmpty()) { + sb.append(", \"_indices\":\"").append(sanitizeResolvedIndices(resolvedIndex)).append("\""); + } + + sb.append("}"); + return sb.toString(); + } + + public List sanitizeResolvedIndices(String jsonArrayString) { + List sanitized = new ArrayList<>(); + + if (jsonArrayString == null || jsonArrayString.isEmpty()) { + return sanitized; + } + + try { + JsonNode arrayNode = mapper.readTree(jsonArrayString); + if (arrayNode.isArray()) { + for (JsonNode node : arrayNode) { + String index = node.asText(); + if (index != null && !index.startsWith(".")) { + sanitized.add(normalizeReservedKeyword(index)); + } + } + } + } catch (Exception e) { + logger.error("Failed to parse resolved indices: " + e.getMessage(), e); + } + + return sanitized; + } + + + public static String checkURIPath(String uri) { + if (uri == null || uri.isEmpty()) { + return uri; + } + + uri = uri.replaceAll("\\[.*?\\]", ""); + + try { + uri = URLDecoder.decode(uri, StandardCharsets.UTF_8.name()); + } catch (UnsupportedEncodingException e) { + uri = uri.replaceAll("%", "_"); + } + + uri = uri.replace(":", "/"); + + if (uri.contains(" snifRestrictedKeywords = Set.of("template", "mappings", "get", "aliases", "user"); + if (word != null && snifRestrictedKeywords.contains(word)) { + return "_" + word; + } + return word; + } + + + @Override + protected ExceptionRecord getException(String payload, String sqlString) { + ExceptionRecord exceptionRecord = new ExceptionRecord(); + String exceptionTypeId = this.getExceptionTypeId(payload); + String category = ""; + + if (exceptionTypeId.isEmpty()) { + category = getValueFromPayload(payload, Constants.AUDIT_CATEGORY); + exceptionTypeId = getExceptionTypeFromCategory(category); + if (exceptionTypeId == null) { + return null; + } + } + exceptionRecord.setExceptionTypeId(exceptionTypeId); + exceptionRecord.setDescription(category); + exceptionRecord.setSqlString(sqlString); + return exceptionRecord; + } + + private String getExceptionTypeFromCategory(String category) { + if (category.contains(Constants.CATEGORY_FAILED_LOGIN) || category.equals(Constants.CATEGORY_REST_FAILED_LOGIN) || category.equals(Constants.CATEGORY_TRANSPORT_FAILED_LOGIN)) { + return "LOGIN_FAILED"; + } + if (category.equals(Constants.CATEGORY_MISSING_PRIVILEGES) || category.equals(Constants.CATEGORY_BAD_HEADERS) || category.equals(Constants.CATEGORY_SSL_EXCEPTION) || category.equals(Constants.CATEGORY_REST_MISSING_PRIVILEGES) || category.equals(Constants.CATEGORY_REST_BAD_HEADERS) || category.equals(Constants.CATEGORY_REST_SSL_EXCEPTION) || category.equals(Constants.CATEGORY_TRANSPORT_MISSING_PRIVILEGES) || category.equals(Constants.CATEGORY_TRANSPORT_BAD_HEADERS) || category.equals(Constants.CATEGORY_TRANSPORT_SSL_EXCEPTION)) { + return "SQL_ERROR"; + } + return null; + } + + @Override + protected String getDbUser(String payload) { + String value = this.getValue(payload, "db_user"); + if (value == null || value.isEmpty()) { + value = this.getValue(payload, "db_user_initiating_user"); + } + return (value == null || value.isEmpty()) ? "N.A." : value; + } + + public static Time parseTimestamp(String timestamp) { + if (timestamp == null || timestamp.isEmpty()) { + throw new IllegalArgumentException("Timestamp cannot be null or empty"); + } + + ZonedDateTime date; + try { + date = ZonedDateTime.parse(timestamp); + } catch (Exception e) { + try { + DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"); + LocalDateTime localDateTime = LocalDateTime.parse(timestamp, formatter); + date = localDateTime.atZone(ZoneId.systemDefault()); + } catch (Exception e2) { + throw new IllegalArgumentException("Could not parse timestamp: " + timestamp, e2); + } + } + long millis = date.toInstant().toEpochMilli(); + int minOffset = date.getOffset().getTotalSeconds() / 60; + int minDst = date.getZone().getRules().isDaylightSavings(date.toInstant()) ? 60 : 0; + return new Time(millis, minOffset, minDst); + } + + @Override + public String getConfigFileContent() { + return new ConfigFileContent().getConfigFileContent(); + } +} \ No newline at end of file diff --git a/filter-plugin/logstash-filter-opensearch-guardium/src/test/java/com/ibm/guardium/aws/opensearch/OpensearchGuardiumFilterTest.java b/filter-plugin/logstash-filter-opensearch-guardium/src/test/java/com/ibm/guardium/aws/opensearch/OpensearchGuardiumFilterTest.java new file mode 100644 index 000000000..9bbed464e --- /dev/null +++ b/filter-plugin/logstash-filter-opensearch-guardium/src/test/java/com/ibm/guardium/aws/opensearch/OpensearchGuardiumFilterTest.java @@ -0,0 +1,393 @@ +package com.ibm.guardium.aws.opensearch; + +import co.elastic.logstash.api.*; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.ibm.guardium.universalconnector.commons.custom_parsing.ParserFactory; +import com.ibm.guardium.universalconnector.commons.structures.Time; +import org.junit.Before; +import org.junit.jupiter.api.Test; +import org.logstash.plugins.ConfigurationImpl; +import org.logstash.plugins.ContextImpl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.junit.jupiter.api.Assertions.*; + +class OpensearchGuardiumFilterTest { + + FilterMatchListener matchListener = new TestMatchListener(); + + String id = "1"; + Configuration config = new ConfigurationImpl(Collections.singletonMap("source", "")); + Context context = new ContextImpl(null, null); + OpensearchGuardiumFilter filter = new OpensearchGuardiumFilter(id, config, context); + + private Parser parser; + private ObjectMapper objectMapper; + + @Before + public void setUp() throws IOException { + parser = new Parser(ParserFactory.ParserType.json); + objectMapper = new ObjectMapper(); + } + + @Test + void testRestLayer() { + String payload = "{\n" + + " \"audit_cluster_name\": \"1245451225:myopensearchpk\",\n" + + " \"audit_transport_headers\": {\n" + + " \"X-Opaque-Id\": \"d6c4f099-4663-4633-baef-98eb0581f020\"\n" + + " },\n" + + " \"audit_node_name\": \"3c5bbacf33948a9a2c26426eb5a55c63\",\n" + + " \"audit_trace_task_id\": \"4dW-p_qGTr6MT1q9RqXThA:2669096\",\n" + + " \"audit_transport_request_type\": \"GetMappingsRequest\",\n" + + " \"audit_category\": \"INDEX_EVENT\",\n" + + " \"audit_request_origin\": \"REST\",\n" + + " \"audit_node_id\": \"4dW-p_qGTr6MT1q9RqXThA\",\n" + + " \"audit_request_layer\": \"TRANSPORT\",\n" + + " \"@timestamp\": \"2025-04-21T17:32:22.227+00:00\",\n" + + " \"audit_format_version\": 4,\n" + + " \"audit_request_remote_address\": \"216.58.113.178\",\n" + + " \"audit_request_privilege\": \"indices:admin/mappings/get\",\n" + + " \"audit_request_effective_user\": \"userpk\",\n" + + " \"audit_trace_resolved_indices\": [\n" + + " \".kibana_1\",\n" + + " \"school\",\n" + + " \".opendistro_security\",\n" + + " \"test_index\",\n" + + " \".opensearch-observability\"\n" + + " ]\n" + + "}"; + Event event = new org.logstash.Event(); + event.setField("message", payload); + Collection actualResponse = filter.filter(Collections.singletonList(event), matchListener); + + assertNotNull(actualResponse.toArray(new Event[0])[0].getField("GuardRecord")); + } + + @Test + void testArrayValuesFromIndex() { + String payload = "{\n" + + " \"audit_compliance_operation\": \"CREATE\",\n" + + " \"audit_cluster_name\": \"1245451225:myopensearchpk\",\n" + + " \"audit_node_name\": \"076ba9bbd1cfeb6c80e1d15a405869ed\",\n" + + " \"audit_category\": \"COMPLIANCE_DOC_WRITE\",\n" + + " \"audit_request_origin\": \"REST\",\n" + + " \"audit_compliance_doc_version\": 1,\n" + + " \"audit_request_body\": \"{\\n \\\"student_id\\\": \\\"101\\\",\\n \\\"name\\\": \\\"John Doe\\\",\\n \\\"age\\\": 15,\\n \\\"grade\\\": \\\"10th\\\",\\n \\\"subjects\\\": [\\\"Math\\\", \\\"Science\\\", \\\"English\\\"]\\n}\\n\",\n" + + " \"audit_node_id\": \"rgz9dKmNT9C9a1iSBM8yjg\",\n" + + " \"@timestamp\": \"2025-03-06T18:43:13.968+00:00\",\n" + + " \"audit_format_version\": 4,\n" + + " \"audit_request_remote_address\": \"69.171.141.155\",\n" + + " \"audit_trace_doc_id\": \"1\",\n" + + " \"audit_request_effective_user\": \"admin\",\n" + + " \"audit_trace_shard_id\": 0,\n" + + " \"audit_trace_indices\": [\n" + + " \"school-2025\"\n" + + " ],\n" + + " \"audit_trace_resolved_indices\": [\n" + + " \"school-2025\"\n" + + " ]\n" + + " }"; + Event event = new org.logstash.Event(); + event.setField("message", payload); + Collection actualResponse = filter.filter(Collections.singletonList(event), matchListener); + + assertNotNull(actualResponse.toArray(new Event[0])[0].getField("GuardRecord")); + } + + @Test + void testFailedLogin() { + String payload = "{\n" + + " \"audit_cluster_name\": \"1245451225:myopensearchpk\",\n" + + " \"audit_node_name\": \"3c5bbacf33948a9a2c26426eb5a55c63\",\n" + + " \"audit_rest_request_method\": \"GET\",\n" + + " \"audit_category\": \"FAILED_LOGIN\",\n" + + " \"audit_request_origin\": \"REST\",\n" + + " \"audit_node_id\": \"4dW-p_qGTr6MT1q9RqXThA\",\n" + + " \"audit_request_layer\": \"REST\",\n" + + " \"audit_rest_request_path\": \"/_plugins/_security/authinfo\",\n" + + " \"@timestamp\": \"2025-04-22T15:10:29.763+00:00\",\n" + + " \"audit_request_effective_user_is_admin\": false,\n" + + " \"audit_format_version\": 4,\n" + + " \"audit_request_remote_address\": \"216.58.113.178\",\n" + + " \"audit_rest_request_headers\": {\n" + + " \"x-opensearch-product-origin\": [\n" + + " \"opensearch-dashboards\"\n" + + " ],\n" + + " \"Connection\": [\n" + + " \"keep-alive\"\n" + + " ],\n" + + " \"x-opaque-id\": [\n" + + " \"56e3fd95-d79f-4077-a674-85fa22fed9e9\"\n" + + " ],\n" + + " \"Host\": [\n" + + " \"localhost:9200\"\n" + + " ],\n" + + " \"Content-Length\": [\n" + + " \"0\"\n" + + " ],\n" + + " \"NO_REDACT\": [\n" + + " \"false\"\n" + + " ]\n" + + " },\n" + + " \"audit_request_effective_user\": \"userpk\"\n" + + "}"; + Event event = new org.logstash.Event(); + event.setField("message", payload); + Collection actualResponse = filter.filter(Collections.singletonList(event), matchListener); + + assertNotNull(actualResponse.toArray(new Event[0])[0].getField("GuardRecord")); + } + + @Test + void testBadHeader() { + String payload = "{\n" + + " \"audit_cluster_name\": \"1245451225:myopensearchpk\",\n" + + " \"audit_rest_request_params\": {\n" + + " \"t\": \"1\",\n" + + " \"index\": \"teorema505\"\n" + + " },\n" + + " \"audit_node_name\": \"076ba9bbd1eb6c80e1d15a405869ed\",\n" + + " \"audit_rest_request_method\": \"GET\",\n" + + " \"audit_category\": \"BAD_HEADERS\",\n" + + " \"audit_request_origin\": \"REST\",\n" + + " \"audit_node_id\": \"rgz9dKmNT9C91iSBM8yjg\",\n" + + " \"audit_request_layer\": \"REST\",\n" + + " \"audit_rest_request_path\": \"/teorema505\",\n" + + " \"@timestamp\": \"2025-03-06T18:39:54.550+00:00\",\n" + + " \"audit_request_effective_user_is_admin\": false,\n" + + " \"audit_format_version\": 4,\n" + + " \"audit_request_remote_address\": \"161.35.66.151\",\n" + + " \"audit_rest_request_headers\": {\n" + + " \"content-length\": [\n" + + " \"0\"\n" + + " ],\n" + + " \"NO_REDACT\": [\n" + + " \"false\"\n" + + " ],\n" + + " \"user-agent\": [\n" + + " \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36\"\n" + + " ],\n" + + " \"accept\": [\n" + + " \"*/*\"\n" + + " ]\n" + + " },\n" + + " \"audit_request_effective_user\": \"\"\n" + + " }"; + Event event = new org.logstash.Event(); + event.setField("message", payload); + Collection actualResponse = filter.filter(Collections.singletonList(event), matchListener); + + assertNotNull(actualResponse.toArray(new Event[0])[0].getField("GuardRecord")); + } + + @Test + void testTransportLayer() { + String payload = "{\n" + + " \"audit_cluster_name\": \"1245451225:myopensearchpk\",\n" + + " \"audit_transport_headers\": {\n" + + " \"X-Opaque-Id\": \"f0075eb2-569f-4fd3-bfuu-2219571dfd9b\"\n" + + " },\n" + + " \"audit_node_name\": \"076ba9bbd1cfeb6c1d15a405869ed\",\n" + + " \"audit_trace_task_id\": \"rgz9dKmNT9C9a1iSBM8yjg:34312\",\n" + + " \"audit_transport_request_type\": \"GetAliasesRequest\",\n" + + " \"audit_category\": \"INDEX_EVENT\",\n" + + " \"audit_request_origin\": \"REST\",\n" + + " \"audit_node_id\": \"rgz9dKmNTC9a1iSBM8yjg\",\n" + + " \"audit_request_layer\": \"TRANSPORT\",\n" + + " \"@timestamp\": \"2025-03-06T16:56:39.103+00:00\",\n" + + " \"audit_format_version\": 4,\n" + + " \"audit_request_remote_address\": \"69.171.141.155\",\n" + + " \"audit_request_privilege\": \"indices:admin/aliases/get\",\n" + + " \"audit_request_effective_user\": \"admin\",\n" + + " \"audit_trace_resolved_indices\": [\n" + + " \".opendistro-reports-instances\",\n" + + " \".ql-datasources\",\n" + + " \".opendistro_security\",\n" + + " \".plugins-ml-config\",\n" + + " \"opensearch_dashboards_sample_data_flights\",\n" + + " \"school-2025\",\n" + + " \".opendistro-reports-definitions\",\n" + + " \".kibana_92668751_admin_1\",\n" + + " \".opensearch-observability\",\n" + + " \"opensearch_dashboards_sample_data_logs\",\n" + + " \".kibana_1\"\n" + + " ]\n" + + " }"; + Event event = new org.logstash.Event(); + event.setField("message", payload); + Collection actualResponse = filter.filter(Collections.singletonList(event), matchListener); + + assertNotNull(actualResponse.toArray(new Event[0])[0].getField("GuardRecord")); + } + + @Test + void testInvalidJsonHandling() { + // Arrange + String invalidJsonPayload = "{ \"@timestamp\": \"2025-03-06 15:39:02.724\", \"@message\": { \"audit_cluster_name\": \"346824953529:myopensearchpk\" "; + Event event = new org.logstash.Event(); + event.setField("message", invalidJsonPayload); + + List events = new ArrayList<>(); + events.add(event); + + Collection actualResponse = filter.filter(events, matchListener); + + assertEquals(0, actualResponse.size()); + } + + @Test + void testParse() { + String payload = "{ \"field1\": \"value1\", \"field2\": [\"value2\", \"value3\"] }"; + Parser parser = new Parser(ParserFactory.ParserType.json); + + String result = parser.parse(payload, "field1"); + assertEquals("value1", result); + + result = parser.parse(payload, "field2[1]"); + assertEquals("value3", result); + + result = parser.parse(payload, "nonexistent"); + assertEquals("", result); + + result = parser.parse(payload, "field2[5]"); + assertEquals("", result); + + result = parser.parse(payload, null); + assertNull(result); + + result = parser.parse(payload, ""); + assertNull(result); + } + + @Test + void testGetValueFromPayload() { + String payload = "{ \"field1\": \"value1\", \"field2\": [\"value2\", \"value3\"] }"; + Parser parser = new Parser(ParserFactory.ParserType.json); + + String result = parser.getValueFromPayload(payload, "field1"); + assertEquals("value1", result); + + result = parser.getValueFromPayload(payload, "nonexistent"); + assertEquals("", result); + + result = parser.getValueFromPayload(payload, "field2[1]"); + assertEquals("value3", result); + + result = parser.getValueFromPayload(payload, "field2[5]"); + assertEquals("", result); + + result = parser.getValueFromPayload(payload, null); + assertNull(result); + + result = parser.getValueFromPayload(payload, ""); + assertNull(result); + } + + @Test + void testNormalizeAuditCategory() { + Parser parser = new Parser(ParserFactory.ParserType.json); + ObjectMapper objectMapper = new ObjectMapper(); + + String restPayload = "{\"audit_category\": \"FAILED_LOGIN\", \"audit_request_layer\": \"REST\"}"; + String normalizedRest = parser.normalizeAuditCategory(restPayload); + try { + JsonNode restNode = objectMapper.readTree(normalizedRest); + assertEquals("REST_FAILED_LOGIN", restNode.path("audit_category").asText()); + } catch (Exception e) { + fail("Exception occurred while parsing JSON: " + e.getMessage()); + } + + String transportPayload = "{\"audit_category\": \"FAILED_LOGIN\", \"audit_request_layer\": \"TRANSPORT\"}"; + String normalizedTransport = parser.normalizeAuditCategory(transportPayload); + try { + JsonNode transportNode = objectMapper.readTree(normalizedTransport); + assertEquals("TRANSPORT_FAILED_LOGIN", transportNode.path("audit_category").asText()); + } catch (Exception e) { + fail("Exception occurred while parsing JSON: " + e.getMessage()); + } + + String standardPayload = "{\"audit_category\": \"INDEX_EVENT\", \"audit_request_layer\": \"REST\"}"; + String normalizedStandard = parser.normalizeAuditCategory(standardPayload); + try { + JsonNode standardNode = objectMapper.readTree(normalizedStandard); + assertEquals("INDEX_EVENT", standardNode.path("audit_category").asText()); + } catch (Exception e) { + fail("Exception occurred while parsing JSON: " + e.getMessage()); + } + + String malformedPayload = "{\"audit_category\": \"FAILED_LOGIN\"}"; + String normalizedMalformed = parser.normalizeAuditCategory(malformedPayload); + assertEquals(malformedPayload, normalizedMalformed); + } + + @Test + void testConfigSchema() { + OpensearchGuardiumFilter filter = new OpensearchGuardiumFilter("testId", null, null); + Collection> configSchema = filter.configSchema(); + assertNotNull(configSchema); + assertEquals(1, configSchema.size()); + assertTrue(configSchema.contains(OpensearchGuardiumFilter.SOURCE_CONFIG)); + } + + @Test + void testGetId() { + String expectedId = "testId"; + OpensearchGuardiumFilter filter = new OpensearchGuardiumFilter(expectedId, null, null); + String actualId = filter.getId(); + assertEquals(expectedId, actualId); + } + + @Test + void testParseTimestamp() { + String isoTimestamp = "2023-10-01T12:34:56.789Z"; + Time time = Parser.parseTimestamp(isoTimestamp); + assertNotNull(time); + + String invalidTimestamp = "invalid-timestamp"; + assertThrows(IllegalArgumentException.class, () -> Parser.parseTimestamp(invalidTimestamp)); + assertThrows(IllegalArgumentException.class, () -> Parser.parseTimestamp(null)); + } + + class TestMatchListener implements FilterMatchListener { + private AtomicInteger matchCount = new AtomicInteger(0); + + public int getMatchCount() { + return matchCount.get(); + } + + @Override + public void filterMatched(co.elastic.logstash.api.Event arg0) { + matchCount.incrementAndGet(); + + } + } + @Test + public void testNormalizeReservedKeyword() { + assertEquals("_user", Parser.normalizeReservedKeyword("user")); + assertEquals("_get", Parser.normalizeReservedKeyword("get")); + assertEquals("school", Parser.normalizeReservedKeyword("school")); + assertNull(Parser.normalizeReservedKeyword(null)); + } + + @Test + public void testCheckURIPath_basic() { + assertEquals("/students", Parser.checkURIPath("students")); + assertEquals("/_user", Parser.checkURIPath("user")); + assertEquals("/_get/_template", Parser.checkURIPath("get/template")); + } + + @Test + public void testCheckURIPath_encodedAndInvalid() { + assertEquals("/data/write/bulk_", Parser.checkURIPath("data:write:bulk%0A")); + assertEquals("/invalid/xml_input", Parser.checkURIPath("")); + assertEquals("/_mappings", Parser.checkURIPath("indices:mappings")); + } +} diff --git a/filter-plugin/logstash-filter-oua-guardium/OuaOverConnectJdbcReadme.md b/filter-plugin/logstash-filter-oua-guardium/OuaOverConnectJdbcReadme.md index d7c7cc93b..c600a5a8b 100644 --- a/filter-plugin/logstash-filter-oua-guardium/OuaOverConnectJdbcReadme.md +++ b/filter-plugin/logstash-filter-oua-guardium/OuaOverConnectJdbcReadme.md @@ -110,14 +110,14 @@ Detailed breakdown: * Configure the policies you require. See [policies](/docs/#policies) for more information. ### Configuring Universal Connector Profile -1. See [Creating data source profile topic](https://www.ibm.com/docs/en/gdp/12.x?topic=configuration-creating-data-source-profiles) to create a datasource profile. +1. To create a datasource profile, see [Creating data source profiles](https://www.ibm.com/docs/en/SSMPHH_12.x/com.ibm.guardium.doc.stap/guc/guc_datasource_profile_management.html). 2. Select '**OUA over JDBC connect**' in the plug-ins list 3. Update the parameters as follows: | Field | Description | |--------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **Credential** | Create JDBC credentials. For more information, see [Creating Credentials](https://www.ibm.com/docs/en/gdp/12.x?topic=configuration-creating-credentials). | -| **Kafka cluster** | Select the appropriate Kafka cluster from the available Kafka cluster list or create a new Kafka cluster. For more information, see [Managing Kafka clusters](https://www.ibm.com/docs/en/gdp/12.x?topic=flow-creating-kafka-clusters). | +| **Credential** | Create JDBC credentials. For more information, see [Creating Credentials](https://www.ibm.com/docs/en/SSMPHH_12.x/com.ibm.guardium.doc.stap/guc/guc_credential_management.html). | +| **Kafka cluster** | Select the appropriate Kafka cluster from the available Kafka cluster list or create a new Kafka cluster. For more information, see [Managing Kafka clusters](https://www.ibm.com/docs/en/SSMPHH_12.x/com.ibm.guardium.doc.stap/guc/guc_kafka_cluster_management.html). | | **No traffic threshold (minutes)** | Default value is 60. If there is no incoming traffic for an hour, S-TAP displays a red status. Once incoming traffic resumes, the status returns to green. | | **Initial Time (ms)** | The timestamp from which the connector starts polling for changes in the database. Setting this to 0 means the connector starts from the earliest available data. For incremental data fetching, this ensures only new data (after the initial time) is retrieved. | | **Hostname** | Specifies the hostname or IP address of the Oracle database server. It is the address where the Oracle instance can be accessed for establishing a JDBC connection. | @@ -127,4 +127,4 @@ Detailed breakdown: | **CDB Service Name / SID** | OUA over JDBC Connect 2.0 and OUA multitenant over JDBC Connect, data is retrived from CDB service audit log: cdb_unified_audit_trail. | -4. Continue from step 3 of [Creating data source profile topic](https://www.ibm.com/docs/en/gdp/12.x?topic=configuration-creating-data-source-profiles) to complete creating a datasource profile. +4. Continue from step 3 of [Creating data source profile topic](https://www.ibm.com/docs/en/SSMPHH_12.x/com.ibm.guardium.doc.stap/guc/guc_datasource_profile_management.html) to complete creating a datasource profile. diff --git a/filter-plugin/logstash-filter-oua-guardium/OuaOverPipeReadme.md b/filter-plugin/logstash-filter-oua-guardium/OuaOverPipeReadme.md index cf3ee9023..ef1b5e0ff 100644 --- a/filter-plugin/logstash-filter-oua-guardium/OuaOverPipeReadme.md +++ b/filter-plugin/logstash-filter-oua-guardium/OuaOverPipeReadme.md @@ -52,12 +52,12 @@ Update the variables in Makefile for your environment's Java home and Logstash l - For other environments including RDS in AWS and Oracle Databases On-Premises run the following commands: ``` - CREATE USER guardium IDENTIFIED BY password; - GRANT CONNECT, RESOURCE to guardium; - GRANT SELECT ANY DICTIONARY TO guardium; - exec DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE(host => 'localhost', - ace => xs$ace_type(privilege_list => xs$name_list('connect', - 'resolve'), principal_name => 'guardium', principal_type => xs_acl.ptype_db)); + CREATE USER IDENTIFIED BY ; + GRANT CONNECT to ; + GRANT AUDIT_VIEWER to ; + GRANT SELECT ON v_$INSTANCE to ; + GRANT SELECT ON v_$DATABASE to ; + GRANT SELECT ON v_$MYSTAT to ; ``` - To verify your new user's privileges, connect to the Oracle instance that you planning to monitor using the name and credentials for your designated user and run the following statements: diff --git a/filter-plugin/logstash-filter-teradatadb-guardium/README.md b/filter-plugin/logstash-filter-teradatadb-guardium/README.md index b9ee964d3..35e3c9b36 100644 --- a/filter-plugin/logstash-filter-teradatadb-guardium/README.md +++ b/filter-plugin/logstash-filter-teradatadb-guardium/README.md @@ -1,10 +1,11 @@ # Teradata-Guardium Logstash filter plug-in ### Meet Teradata -* Tested versions: 16.20, 17.5 -* Environment: Cloud, On-premise + +* Tested versions: 16.20, 17.20, Azure VCE 17.20/20.0 +* Environment: Cloud, On-premise, Azure VCE 17.2/20.0 * Supported inputs: JDBC (pull) * Supported Guardium versions: - * Guardium Data Protection: 11.4 and above + * Guardium Data Protection: 11.4 and later This is a [Logstash](https://github.com/elastic/logstash) filter plug-in for the universal connector that is featured in IBM Security Guardium. It parses events and messages from the Teradata audit log into a [Guardium record](https://github.com/IBM/universal-connectors/raw/main/common/src/main/java/com/ibm/guardium/universalconnector/commons/structures/Record.java) instance (which is a standard structure comprised of several parts). The information is then sent over to Guardium. Guardium records include the accessor (the person who tried to access the data), session, data, and exceptions. If there are no errors, the data contains details about the query "construct". The construct details the main action (verb) and collections (objects) involved. @@ -16,82 +17,89 @@ The plug-in is free and open-source (Apache 2.0). It can be used as a starting p There are multiple ways to install a Teradata server. For this example, we will assume that we already have a working Teradata server setup. ## 2. Enabling Auditing - 1. Connect to the Teradata server using SSH. - + 2. Login with the dbc user (or any other user) of teradata that has access to DBQLAccessMacro using bteq. The commands to log in with dbc user commands are: ```bteq .logon /dbc,``` -In the above command, give the password for the dbc user. - +In the above command, give the password for the dbc user. + 4. Create a user to read logs from audit tables through the logstash JDBC input plug-in. - CREATE USER AS PERMANENT = 100000000 BYTES PASSWORD = "" - + CREATE USER AS PERMANENT = 100000000 BYTES PASSWORD = "" + 5. To grant read access of objects inside dbc user to above created user, execute the below command:- - + GRANT SELECT ON "dbc" TO ""; - -6. There are mutiple ways to enable DBQL Query Logging. Query logging includes a variety of table/view combinations in the DBC database. Logging for users, accounts, and applications should be used when it is really required. Query logging can be enabled for all users or for specific users. - + +6. There are mutiple ways to enable DBQL Query Logging. Query logging includes a variety of table/view combinations in the DBC database. Logging for users, accounts, and applications should be used when it is really required. Query logging can be enabled for all users or for specific users. + To enable Auditing, use a command like this one: - + BEGIN QUERY LOGGING WITH SQL LIMIT SQLTEXT=0 ON ALL; - + DBQL Query Logging can be explored further in this [document](https://docs.teradata.com/r/qOek~PvFMDdCF0yyBN6zkA/f7yJJ4siIiBUpoQVvvAwpQ). - -6. Type "exit;" to get out of bteq terminal. - + +6. Type "exit;" to get out of bteq terminal. + 7. Set the database time zone with two dbscontrol fields: - + "18. System TimeZone String" must be set to the timezone which we want to configure for our database. "57. TimeDateWZControl " must be set to 2. - -8. Close the terminal. - + +8. Close the terminal. + 9. We can verify that a logging rule is created in a table. Log in with dbc user. You can choose to trigger the below query through any client utility. In this case we are using Teradata Studio Express. - + select * from DBC.DBQLRulesV; - +### Notes: +For **Teradata VCE on Azure**, run the following query to turn the audit log on. +``` +BEGIN QUERY LOGGING WITH SQL LIMIT SQLTEXT=0 ON ALL; +``` +To check if the audit log is on/off, run: +``` +select * from DBC.DBQLRulesV; +``` ## 3. Steps to disable Auditing Auditing can be disabled similarly to how we enabled auditing by logging in with dbc user or any other user that has access to DBQLAccessMacro. - + To disable query logging, execute the below command:- - + END QUERY LOGGING WITH SQL LIMIT SQLTEXT=0 ON ALL; - -## 4. Archiving and Deleting DBQL Logs + +## 4. Archiving and Deleting DBQL Logs There are many ways to archive and delete DBQL logs. One of the ways is depicted below:- - + Use the following steps to delete old log data from system tables manually: - + It is recommended, though not necessary, to disable DBQL logging before you perform clean up activities on the logs. Otherwise, the delete process locks the DBQL table and if DBQL needs to flush a cache to the same table to continue logging queries, the whole system could experience a slow-down. - + ***Note: You cannot delete data that is less than 30 days old.*** - + 1. To back up log data - a. Create a duplicate log table in another database using the Copy Table syntax for the CREATE TABLE statement. - CT DBC.tablename AS databasename.tablename - b. Back up the table to tape storage in accordance with your site backup policy. - c. Drop the duplicate table using a DROP TABLE statement. + a. Create a duplicate log table in another database using the Copy Table syntax for the CREATE TABLE statement. + CT DBC.tablename AS databasename.tablename + b. Back up the table to tape storage in accordance with your site backup policy. + c. Drop the duplicate table using a DROP TABLE statement. 2. Log on to Teradata Studio as DBADMIN or another administrative user with DELETE privileges on database DBC. 3. In the Query window, enter an SQL statement to purge old log entries. For example: - DELETE FROM DBC.object_name WHERE (Date - LogDate) > number_of_days ; + DELETE FROM DBC.object_name WHERE (Date - LogDate) > number_of_days ; Examples for using above query:- - DELETE FROM DBC.DBQLOGTBL WHERE (DATE '2021-12-16' - cast(starttime as DATE)) > 30 ; - DELETE FROM DBC.DBQLSqlTbl WHERE (DATE '2021-12-16' - cast(collecttimestamp as DATE)) > 30 ; +DELETE FROM DBC.DBQLOGTBL WHERE (DATE '2021-12-16' - cast(starttime as DATE)) > 30 ; +DELETE FROM DBC.DBQLSqlTbl WHERE (DATE '2021-12-16' - cast(collecttimestamp as DATE)) > 30 ; #### Limitations: - + • Teradata sniffer parser does not parse below listed operations properly. Hence this plug-in does not support these operations: -1] User Management +1] User Management 2] DBQL Queries @@ -100,16 +108,16 @@ Examples for using above query:- 4] Cast operations 5] Stored Procedure and User Defined Functions - + • The Teradata auditing does not audit authentication failure(Login Failed) operations. -• Following important field couldn't mapped with TeradataDB audit logs. +• Following important field couldn't mapped with TeradataDB audit logs. 1] Client HostName : Not Available with audit logs. 2] Database Name : Not Available with audit logs. - -• In case of EC2 guardium instance, Teradata traffic took more time (25-30 min) to populate data in full sql Report. + +• In case of EC2 guardium instance, Teradata traffic took more time (25-30 min) to populate data in full sql Report. • This plug-in supports queries that are approximately 32,000 characters long. When the count of characters in a query exceed the given count, the remaining part of the query is stored in other rows. This is why the SQLTextInfo column of the table DBC.DBQLSqlTbl has more than one row per QueryID. @@ -133,13 +141,13 @@ The Guardium universal connector is the Guardium entry point for native audit lo • Download driver jar - Go to the URL https://downloads.teradata.com/download/connectivity/jdbc-driver and download the zip/tar for required version. After extracting the downloaded zip/tar, there will be a jar file. -#### Procedure: +#### Procedure: 1. On the collector, go to Setup > Tools and Views > Configure Universal Connector. 2. First enable the Universal Guardium connector, if it is disabled already. 3. Click ```Upload File``` and upload the jar/jars which you downloaded from the teradata website. 4. Click ```Upload File``` and select the offline [logstash-filter-teradatadb_guardium_plugin_filter.zip](./TeradataOverJdbcPackage/logstash-filter-teradatadb_guardium_plugin_filter.zip) plug-in. After it is uploaded, click ```OK```. This is not necessary for Guardium Data Protection v11.0p490 or later, v11.0p540 or later, v12.0 or later. -5. Click the Plus sign to open the ```Connector Configuration``` dialog box. +5. Click the Plus sign to open the ```Connector Configuration``` dialog box. 6. Type a name in the Connector name field. 7. Update the input section to add the details from the [teradataJDBC.conf](./TeradataOverJdbcPackage/teradataJDBC.conf) file's input part, omitting the keyword "input{" at the beginning and its corresponding "}" at the end. Provide the details for database server name, username, and password that are required for connecting with JDBC. 8. Update the filter section to add the details from the [teradataJDBC.conf](./TeradataOverJdbcPackage/teradataJDBC.conf) file's filter part, omitting the keyword "filter{" at the beginning and its corresponding "}" at the end. Provide the same database server name that you gave in the above step against the Server_Hostname attribute in the filter section. @@ -158,4 +166,4 @@ The Guardium universal connector is the Guardium entry point for native audit lo 2. Check Default capture value, Log Records Affected and Inspect Returned data checkboxes, if unchecked already. 3. Click on Apply. 4. Click on Restart Inspection Engines. -5. To add the column in the Full SQL Report, select the Attribute ```Records Affected``` listed under the Entity ```FULL SQL```. \ No newline at end of file +5. To add the column in the Full SQL Report, select the Attribute ```Records Affected``` listed under the Entity ```FULL SQL```. diff --git a/filter-plugin/logstash-filter-trino-guardium/README.md b/filter-plugin/logstash-filter-trino-guardium/README.md index 61951d716..8fcb3abbc 100644 --- a/filter-plugin/logstash-filter-trino-guardium/README.md +++ b/filter-plugin/logstash-filter-trino-guardium/README.md @@ -77,7 +77,7 @@ enforcements. * Configure the policies you require. See [policies](/docs/#policies) for more information. * You must have permission for the S-Tap Management role. The admin user includes this role by default * Download - the [logstash-filter-trino_guardium_filter](../../filter-plugin/logstash-filter-trino-guardium/logstash-filter-trino_guardium_plugin_filter.zip) + the [logstash-filter-trino_guardium_filter](../../filter-plugin/logstash-filter-trino-guardium/logstash-filter-trino_guardium_filter.zip) plug-in. * Verify that the http input plugin is available on the GDP system. If the plugin is missing, download and install the [logstash-input-http](../../input-plugin/logstash-input-http/logstash-input-http_guardium_filter.zip) @@ -88,7 +88,7 @@ enforcements. 1. On the collector, go to ```Setup``` > ```Tools and Views``` > ```Configure Universal Connector```. 2. Enable the universal connector if it is disabled. 3. Click ```Upload File``` and select the - offline [logstash-filter-trino_guardium_filter](../../filter-plugin/logstash-filter-trino-guardium/logstash-filter-trino_guardium_plugin_filter.zip) + offline [logstash-filter-trino_guardium_filter](../../filter-plugin/logstash-filter-trino-guardium/logstash-filter-trino_guardium_filter.zip) plug-in. After it is uploaded, click ```OK```. 4. Click ```Upload File``` and select the key.json file. After it is uploaded, click ```OK```. 5. Click the Plus sign to open the Connector Configuration dialog box. diff --git a/filter-plugin/logstash-filter-trino-guardium/TrinoOverSyslogPackage/TrinoSyslog.conf b/filter-plugin/logstash-filter-trino-guardium/TrinoOverHttpPackage/TrinoOverHttp.conf similarity index 100% rename from filter-plugin/logstash-filter-trino-guardium/TrinoOverSyslogPackage/TrinoSyslog.conf rename to filter-plugin/logstash-filter-trino-guardium/TrinoOverHttpPackage/TrinoOverHttp.conf diff --git a/filter-plugin/logstash-filter-trino-guardium/logstash-filter-trino_guardium_plugin_filter.zip b/filter-plugin/logstash-filter-trino-guardium/logstash-filter-trino_guardium_filter.zip similarity index 100% rename from filter-plugin/logstash-filter-trino-guardium/logstash-filter-trino_guardium_plugin_filter.zip rename to filter-plugin/logstash-filter-trino-guardium/logstash-filter-trino_guardium_filter.zip diff --git a/input-plugin/logstash-input-couchbase-capella/README.md b/input-plugin/logstash-input-couchbase-capella/README.md index 5f289f116..807f93dce 100644 --- a/input-plugin/logstash-input-couchbase-capella/README.md +++ b/input-plugin/logstash-input-couchbase-capella/README.md @@ -47,8 +47,8 @@ https://docs.couchbase.com/server/current/manage/manage-security/manage-auditing 3. Click ```Upload File``` and select the offline [logstash-input-couchbase_capella_input](logstash-input-couchbase_capella_input.zip) plug-in. After it is uploaded, click ```OK```. 4. Click the Plus sign to open the Connector Configuration dialog box. 5. Type a name in the Connector name field. -6. Update the input section to add the details from the [capellaCouchbase.conf](../../filter-plugin/logstash-filter-capella-guardium/capellaCouchbaseOverCapellaPackage/capella/capellaCouchbase.conf) file's input part, omitting the keyword "input{" at the beginning and its corresponding "}" at the end. -7. Update the filter section to add the details from the [capellaCouchbase.conf](../../filter-plugin/logstash-filter-capella-guardium/capellaCouchbaseOverCapellaPackage/capella/capellaCouchbase.conf) file's filter part, omitting the keyword "filter{" at the beginning and its corresponding "}" at the end. +6. Update the input section to add the details from the [capellaCouchbase.conf](../../filter-plugin/logstash-filter-capella-guardium/CapellaCouchbaseOverCapellaPackage/capellaCouchbase.conf) file's input part, omitting the keyword "input{" at the beginning and its corresponding "}" at the end. +7. Update the filter section to add the details from the [capellaCouchbase.conf](../../filter-plugin/logstash-filter-capella-guardium/CapellaCouchbaseOverCapellaPackage/capellaCouchbase.conf) file's filter part, omitting the keyword "filter{" at the beginning and its corresponding "}" at the end. 8. The 'type' fields should match in the input and filter configuration sections. This field should be unique for every individual connector added. 9. Click ```Save```. Guardium validates the new connector and displays it in the Configure Universal Connector page. 10. After the offline plug-in is installed and the configuration is uploaded and saved in the Guardium machine, restart the Universal Connector using the ```Disable/Enable``` button. @@ -97,5 +97,9 @@ For more information, reference here https://docs.couchbase.com/server/current/a * ## Limitations * No more than three historical export requests are permitted over 24-hour period. +Notes: +* It may take approximately 15–20 minutes for data to appear in the Full SQL report. +* The S-TAP page may display multiple entries if the server is accessed using different ports. + * ## Suggestion * In the configuration file, query_interval and query_length have no restrictions, with both fields defaulting to 1 hour. However, we recommend using shorter intervals rather than longer ones, as a larger interval may result in unnecessary waiting time before the next cycle, leading to resource inefficiency. \ No newline at end of file diff --git a/input-plugin/logstash-input-couchbase-capella/logstash-input-couchbase_capella_input.zip b/input-plugin/logstash-input-couchbase-capella/logstash-input-couchbase_capella_input.zip index 1cd342369..02e59376f 100644 Binary files a/input-plugin/logstash-input-couchbase-capella/logstash-input-couchbase_capella_input.zip and b/input-plugin/logstash-input-couchbase-capella/logstash-input-couchbase_capella_input.zip differ diff --git a/input-plugin/logstash-input-couchbase-capella/src/test/java/org/logstashplugins/CouchbaseCapellaInputTest.java b/input-plugin/logstash-input-couchbase-capella/src/test/java/org/logstashplugins/CouchbaseCapellaInputTest.java index c847edc82..35e1cc3d9 100644 --- a/input-plugin/logstash-input-couchbase-capella/src/test/java/org/logstashplugins/CouchbaseCapellaInputTest.java +++ b/input-plugin/logstash-input-couchbase-capella/src/test/java/org/logstashplugins/CouchbaseCapellaInputTest.java @@ -18,6 +18,7 @@ import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; +import java.util.TimeZone; import static org.mockserver.integration.ClientAndServer.startClientAndServer; import static org.mockserver.stop.Stop.stopQuietly; @@ -60,7 +61,7 @@ public void testCouchbaseCapellaInputTest() { var baseUrl = String.format("http://%s:%d/%s", mockServerHost, mockServerPort, mockServerApiBasePath); Map configValues = new HashMap<>(); configValues.put(CouchbaseCapellaInput.INTERVAL_CONFIG.name(), 2L); - configValues.put(CouchbaseCapellaInput.QUERY_LENGTH_CONFIG.name(), 1L); + configValues.put(CouchbaseCapellaInput.QUERY_LENGTH_CONFIG.name(), 20 * 60L); configValues.put(CouchbaseCapellaInput.API_BASE_URL_CONFIG.name(), baseUrl); configValues.put(CouchbaseCapellaInput.ORG_ID_CONFIG.name(), "success-org"); configValues.put(CouchbaseCapellaInput.PROJECT_ID_CONFIG.name(), "success-project"); @@ -106,21 +107,28 @@ public void run() { @Test public void testCouchbaseCapellaInput_EpochToISO8601() { - var baseUrl = String.format("http://%s:%d/%s", mockServerHost, mockServerPort, mockServerApiBasePath); - Map configValues = new HashMap<>(); - configValues.put(CouchbaseCapellaInput.INTERVAL_CONFIG.name(), 2L); - configValues.put(CouchbaseCapellaInput.QUERY_LENGTH_CONFIG.name(), 1L); - configValues.put(CouchbaseCapellaInput.API_BASE_URL_CONFIG.name(), baseUrl); - configValues.put(CouchbaseCapellaInput.ORG_ID_CONFIG.name(), "success-org"); - configValues.put(CouchbaseCapellaInput.PROJECT_ID_CONFIG.name(), "success-project"); - configValues.put(CouchbaseCapellaInput.CLUSTER_ID_CONFIG.name(), "success-cluster"); - configValues.put(CouchbaseCapellaInput.AUTH_TOKEN_CONFIG.name(), "Bearer good_token"); - - Configuration config = new ConfigurationImpl(configValues); - CouchbaseCapellaInput input = new CouchbaseCapellaInput("test-id", config, null); + TimeZone originalTimeZone = TimeZone.getDefault(); - var dateTimeStr = input.epochSecToISO8601DateTimeString(1747702429L); - Assert.assertEquals("2025-05-19T20:53:49Z", dateTimeStr); + try { + TimeZone.setDefault(TimeZone.getTimeZone("UTC")); + var baseUrl = String.format("http://%s:%d/%s", mockServerHost, mockServerPort, mockServerApiBasePath); + Map configValues = new HashMap<>(); + configValues.put(CouchbaseCapellaInput.INTERVAL_CONFIG.name(), 2L); + configValues.put(CouchbaseCapellaInput.QUERY_LENGTH_CONFIG.name(), 20 * 60L); + configValues.put(CouchbaseCapellaInput.API_BASE_URL_CONFIG.name(), baseUrl); + configValues.put(CouchbaseCapellaInput.ORG_ID_CONFIG.name(), "success-org"); + configValues.put(CouchbaseCapellaInput.PROJECT_ID_CONFIG.name(), "success-project"); + configValues.put(CouchbaseCapellaInput.CLUSTER_ID_CONFIG.name(), "success-cluster"); + configValues.put(CouchbaseCapellaInput.AUTH_TOKEN_CONFIG.name(), "Bearer good_token"); + + Configuration config = new ConfigurationImpl(configValues); + CouchbaseCapellaInput input = new CouchbaseCapellaInput("test-id", config, null); + + var dateTimeStr = input.epochSecToISO8601DateTimeString(1747702429L); + Assert.assertEquals("2025-05-20T00:53:49Z", dateTimeStr); + } finally { + TimeZone.setDefault(originalTimeZone); // Restore after test + } } @Test(expected = IllegalArgumentException.class) @@ -128,7 +136,7 @@ public void testCouchbaseCapellaInput_EmptyBaseUrl() { Map configValues = new HashMap<>(); configValues.put(CouchbaseCapellaInput.INTERVAL_CONFIG.name(), 2L); - configValues.put(CouchbaseCapellaInput.QUERY_LENGTH_CONFIG.name(), 1L); + configValues.put(CouchbaseCapellaInput.QUERY_LENGTH_CONFIG.name(), 20 * 60L); configValues.put(CouchbaseCapellaInput.API_BASE_URL_CONFIG.name(), ""); configValues.put(CouchbaseCapellaInput.ORG_ID_CONFIG.name(), "success-org"); configValues.put(CouchbaseCapellaInput.PROJECT_ID_CONFIG.name(), "success-project"); @@ -147,7 +155,7 @@ public void testCouchbaseCapellaInput_EmptyOrgID() { var baseUrl = String.format("http://%s:%d/%s", mockServerHost, mockServerPort, mockServerApiBasePath); Map configValues = new HashMap<>(); configValues.put(CouchbaseCapellaInput.INTERVAL_CONFIG.name(), 2L); - configValues.put(CouchbaseCapellaInput.QUERY_LENGTH_CONFIG.name(), 1L); + configValues.put(CouchbaseCapellaInput.QUERY_LENGTH_CONFIG.name(), 20 * 60L); configValues.put(CouchbaseCapellaInput.API_BASE_URL_CONFIG.name(), baseUrl); configValues.put(CouchbaseCapellaInput.ORG_ID_CONFIG.name(), ""); configValues.put(CouchbaseCapellaInput.PROJECT_ID_CONFIG.name(), "success-project"); @@ -166,7 +174,7 @@ public void testCouchbaseCapellaInput_EmptyProjectID() { var baseUrl = String.format("http://%s:%d/%s", mockServerHost, mockServerPort, mockServerApiBasePath); Map configValues = new HashMap<>(); configValues.put(CouchbaseCapellaInput.INTERVAL_CONFIG.name(), 2L); - configValues.put(CouchbaseCapellaInput.QUERY_LENGTH_CONFIG.name(), 1L); + configValues.put(CouchbaseCapellaInput.QUERY_LENGTH_CONFIG.name(), 20 * 60L); configValues.put(CouchbaseCapellaInput.API_BASE_URL_CONFIG.name(), baseUrl); configValues.put(CouchbaseCapellaInput.ORG_ID_CONFIG.name(), "success-org"); configValues.put(CouchbaseCapellaInput.PROJECT_ID_CONFIG.name(), ""); @@ -185,7 +193,7 @@ public void testCouchbaseCapellaInput_EmptyClusterID() { var baseUrl = String.format("http://%s:%d/%s", mockServerHost, mockServerPort, mockServerApiBasePath); Map configValues = new HashMap<>(); configValues.put(CouchbaseCapellaInput.INTERVAL_CONFIG.name(), 2L); - configValues.put(CouchbaseCapellaInput.QUERY_LENGTH_CONFIG.name(), 1L); + configValues.put(CouchbaseCapellaInput.QUERY_LENGTH_CONFIG.name(), 20 * 60L); configValues.put(CouchbaseCapellaInput.API_BASE_URL_CONFIG.name(), baseUrl); configValues.put(CouchbaseCapellaInput.ORG_ID_CONFIG.name(), "success-org"); configValues.put(CouchbaseCapellaInput.PROJECT_ID_CONFIG.name(), "success-project"); @@ -204,7 +212,7 @@ public void testCouchbaseCapellaInput_EmptyAuthToken() { var baseUrl = String.format("http://%s:%d/%s", mockServerHost, mockServerPort, mockServerApiBasePath); Map configValues = new HashMap<>(); configValues.put(CouchbaseCapellaInput.INTERVAL_CONFIG.name(), 2L); - configValues.put(CouchbaseCapellaInput.QUERY_LENGTH_CONFIG.name(), 1L); + configValues.put(CouchbaseCapellaInput.QUERY_LENGTH_CONFIG.name(), 20 * 60L); configValues.put(CouchbaseCapellaInput.API_BASE_URL_CONFIG.name(), baseUrl); configValues.put(CouchbaseCapellaInput.ORG_ID_CONFIG.name(), "success-org"); configValues.put(CouchbaseCapellaInput.PROJECT_ID_CONFIG.name(), "success-project"); diff --git a/input-plugin/logstash-input-couchbase-capella/src/test/resources/mocks/mock-audit-log.tar.gz b/input-plugin/logstash-input-couchbase-capella/src/test/resources/mocks/mock-audit-log.tar.gz index 010185ed7..438d2d38a 100644 Binary files a/input-plugin/logstash-input-couchbase-capella/src/test/resources/mocks/mock-audit-log.tar.gz and b/input-plugin/logstash-input-couchbase-capella/src/test/resources/mocks/mock-audit-log.tar.gz differ