Skip to content
Snippets Groups Projects
Commit f6536e84 authored by Arne Øslebø's avatar Arne Øslebø
Browse files

merge in dev01b branch

parents 7e78c6e6 f37e1b1e
No related branches found
No related tags found
No related merge requests found
Showing
with 1789 additions and 0 deletions
---
- name: Configure Cassandra
template:
src: cassandra.yaml.j2
dest: /usr/share/cassandra/conf/cassandra.yaml
tags:
- start
- name: Start Cassandra
command: "/start.sh"
tags:
- start
- name: Wait for Cassandra
wait_for:
host: "{{groups['cassandra'][0]}}"
port: 9042
state: started
delay: 5
tags:
- start
- name: Stop Cassandra
command: "pkill -SIGTERM -F /var/run/cassandra/cassandra.pid"
tags:
- stop
This diff is collapsed.
---
- name: Copy cacert to ca-trust dir
remote_user: root
copy:
src: "files/{{ca_cn}}.crt"
dest: /etc/pki/ca-trust/source/anchors/ca.crt
tags:
- start
- startcortex
- name: Install cacert to root truststore
remote_user: root
command: "update-ca-trust"
tags:
- start
- startcortex
- name: Copy certificates in cortex conf dir
copy:
src: "{{ item }}"
dest: "/etc/cortex/{{ item }}"
mode: 0600
with_items:
- "{{ inventory_hostname }}.p12"
- "{{ inventory_hostname }}.crt"
- "{{ inventory_hostname }}.key"
- cacerts.jks
- "{{ca_cn}}.crt"
tags:
- start
- startcortex
- name: Configure embedded Elasticsearch 6
remote_user: root
template:
src: jvm.options.j2
dest: /etc/elasticsearch/jvm.options
tags:
- start
- startcortex
- name: Start embedded Elasticsearch 6
remote_user: root
command: >
daemonize
-u elasticsearch
-c /usr/share/elasticsearch
-p /tmp/elasticsearch.pid
-o /tmp/elasticsearch-stdout.log
/usr/share/elasticsearch/bin/elasticsearch
tags:
- start
- startcortex
- name: Configure Cortex
template:
src: application.conf.j2
dest: /etc/cortex/application.conf
tags:
- start
- startcortex
- name: Start Cortex
command: >
daemonize
-c /opt/cortex
-p /tmp/cortex.pid
-o /tmp/cortex-stdout.log
/opt/cortex/bin/cortex
-Dconfig.file=/etc/cortex/application.conf
-Dlogger.file=/etc/cortex/logback.xml
-J-Xms1g
-J-Xmx1g
-Dpidfile.path=/dev/null
tags:
- start
- startcortex
- name: Wait for Cortex
wait_for:
host: "{{groups['cortex'][0]}}"
port: 9001
state: started
delay: 5
tags:
- start
- startcortex
- name: Stop Cortex
command: "pkill -SIGTERM -F /tmp/cortex.pid"
tags:
- stop
- stopcortex
# Sample Cortex application.conf file
## SECRET KEY
#
# The secret key is used to secure cryptographic functions.
#
# IMPORTANT: If you deploy your application to several instances, make
# sure to use the same key.
play.http.secret.key="{{cortex_secret_key}}"
## ElasticSearch
search {
# Name of the index
index = cortex
# ElasticSearch instance address.
# For cluster, join address:port with ',': "http://ip1:9200,ip2:9200,ip3:9200"
#uri = "https://{{groups['odfeescontainers'][0]}}:9200"
uri = "http://localhost:9200"
## Advanced configuration
# Scroll keepalive.
#keepalive = 1m
# Scroll page size.
#pagesize = 50
# Number of shards
#nbshards = 5
# Number of replicas
#nbreplicas = 1
# Arbitrary settings
#settings {
# # Maximum number of nested fields
# mapping.nested_fields.limit = 100
#}
## ## Authentication configuration
## search.username = "cortex"
## search.password = "{{cortex_odfe_pass}}"
##
## ## SSL configuration
## search.keyStore {
## path = "/etc/cortex/dsoclab-cortex.p12"
## type = "PKCS12" # or PKCS12
## password = "{{kspass}}"
## }
## search.trustStore {
## path = "/etc/cortex/cacerts.jks"
## type = "JKS" # or PKCS12
## password = "{{tspass}}"
## }
}
## Cache
#
# If an analyzer is executed against the same observable, the previous report can be returned without re-executing the
# analyzer. The cache is used only if the second job occurs within cache.job (the default is 10 minutes).
cache.job = 10 minutes
## Authentication
auth {
# "provider" parameter contains the authentication provider(s). It can be multi-valued, which is useful
# for migration.
# The available auth types are:
# - services.LocalAuthSrv : passwords are stored in the user entity within ElasticSearch). No
# configuration are required.
# - ad : use ActiveDirectory to authenticate users. The associated configuration shall be done in
# the "ad" section below.
# - ldap : use LDAP to authenticate users. The associated configuration shall be done in the
# "ldap" section below.
provider = [local]
ad {
# The Windows domain name in DNS format. This parameter is required if you do not use
# 'serverNames' below.
#domainFQDN = "mydomain.local"
# Optionally you can specify the host names of the domain controllers instead of using 'domainFQDN
# above. If this parameter is not set, TheHive uses 'domainFQDN'.
#serverNames = [ad1.mydomain.local, ad2.mydomain.local]
# The Windows domain name using short format. This parameter is required.
#domainName = "MYDOMAIN"
# If 'true', use SSL to connect to the domain controller.
#useSSL = true
}
ldap {
# The LDAP server name or address. The port can be specified using the 'host:port'
# syntax. This parameter is required if you don't use 'serverNames' below.
#serverName = "ldap.mydomain.local:389"
# If you have multiple LDAP servers, use the multi-valued setting 'serverNames' instead.
#serverNames = [ldap1.mydomain.local, ldap2.mydomain.local]
# Account to use to bind to the LDAP server. This parameter is required.
#bindDN = "cn=thehive,ou=services,dc=mydomain,dc=local"
# Password of the binding account. This parameter is required.
#bindPW = "***secret*password***"
# Base DN to search users. This parameter is required.
#baseDN = "ou=users,dc=mydomain,dc=local"
# Filter to search user in the directory server. Please note that {0} is replaced
# by the actual user name. This parameter is required.
#filter = "(cn={0})"
# If 'true', use SSL to connect to the LDAP directory server.
#useSSL = true
}
}
## ANALYZERS
#
analyzer {
# analyzer location
# url can be point to:
# - directory where analyzers are installed
# - json file containing the list of analyzer descriptions
urls = [
#"https://dl.bintray.com/thehive-project/cortexneurons/analyzers.json"
"/opt/Cortex-Analyzers/analyzers"
]
# Sane defaults. Do not change unless you know what you are doing.
fork-join-executor {
# Min number of threads available for analysis.
parallelism-min = 2
# Parallelism (threads) ... ceil(available processors * factor).
parallelism-factor = 2.0
# Max number of threads available for analysis.
parallelism-max = 4
}
}
# RESPONDERS
#
responder {
# responder location (same format as analyzer.urls)
urls = [
#"https://dl.bintray.com/thehive-project/cortexneurons/reponders.json"
"/opt/Cortex-Analyzers/responders"
]
# Sane defaults. Do not change unless you know what you are doing.
fork-join-executor {
# Min number of threads available for analysis.
parallelism-min = 2
# Parallelism (threads) ... ceil(available processors * factor).
parallelism-factor = 2.0
# Max number of threads available for analysis.
parallelism-max = 4
}
}
# It's the end my friend. Happy hunting!
## JVM configuration
################################################################
## IMPORTANT: JVM heap size
################################################################
##
## You should always set the min and max JVM heap
## size to the same value. For example, to set
## the heap to 4 GB, set:
##
## -Xms4g
## -Xmx4g
##
## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html
## for more information
##
################################################################
# Xms represents the initial size of total heap space
# Xmx represents the maximum size of total heap space
-Xms{{cortex_elasticsearch_mem}}
-Xmx{{cortex_elasticsearch_mem}}
################################################################
## Expert settings
################################################################
##
## All settings below this section are considered
## expert settings. Don't tamper with them unless
## you understand what you are doing
##
################################################################
## GC configuration
8-13:-XX:+UseConcMarkSweepGC
8-13:-XX:CMSInitiatingOccupancyFraction=75
8-13:-XX:+UseCMSInitiatingOccupancyOnly
## G1GC Configuration
# NOTE: G1 GC is only supported on JDK version 10 or later
# to use G1GC, uncomment the next two lines and update the version on the
# following three lines to your version of the JDK
# 10-13:-XX:-UseConcMarkSweepGC
# 10-13:-XX:-UseCMSInitiatingOccupancyOnly
14-:-XX:+UseG1GC
14-:-XX:G1ReservePercent=25
14-:-XX:InitiatingHeapOccupancyPercent=30
## DNS cache policy
# cache ttl in seconds for positive DNS lookups noting that this overrides the
# JDK security property networkaddress.cache.ttl; set to -1 to cache forever
-Des.networkaddress.cache.ttl=60
# cache ttl in seconds for negative DNS lookups noting that this overrides the
# JDK security property networkaddress.cache.negative ttl; set to -1 to cache
# forever
-Des.networkaddress.cache.negative.ttl=10
## optimizations
# pre-touch memory pages used by the JVM during initialization
-XX:+AlwaysPreTouch
## basic
# explicitly set the stack size
-Xss1m
# set to headless, just in case
-Djava.awt.headless=true
# ensure UTF-8 encoding by default (e.g. filenames)
-Dfile.encoding=UTF-8
# use our provided JNA always versus the system one
-Djna.nosys=true
# turn off a JDK optimization that throws away stack traces for common
# exceptions because stack traces are important for debugging
-XX:-OmitStackTraceInFastThrow
# enable helpful NullPointerExceptions (https://openjdk.java.net/jeps/358), if
# they are supported
14-:-XX:+ShowCodeDetailsInExceptionMessages
# flags to configure Netty
-Dio.netty.noUnsafe=true
-Dio.netty.noKeySetOptimization=true
-Dio.netty.recycler.maxCapacityPerThread=0
# log4j 2
-Dlog4j.shutdownHookEnabled=false
-Dlog4j2.disable.jmx=true
-Djava.io.tmpdir=${ES_TMPDIR}
## heap dumps
# generate a heap dump when an allocation from the Java heap fails
# heap dumps are created in the working directory of the JVM
-XX:+HeapDumpOnOutOfMemoryError
# specify an alternative path for heap dumps; ensure the directory exists and
# has sufficient space
-XX:HeapDumpPath=/var/lib/elasticsearch
# specify an alternative path for JVM fatal error logs
-XX:ErrorFile=/var/log/elasticsearch/hs_err_pid%p.log
## JDK 8 GC logging
8:-XX:+PrintGCDetails
8:-XX:+PrintGCDateStamps
8:-XX:+PrintTenuringDistribution
8:-XX:+PrintGCApplicationStoppedTime
8:-Xloggc:/var/log/elasticsearch/gc.log
8:-XX:+UseGCLogFileRotation
8:-XX:NumberOfGCLogFiles=32
8:-XX:GCLogFileSize=64m
# JDK 9+ GC logging
9-:-Xlog:gc*,gc+age=trace,safepoint:file=/var/log/elasticsearch/gc.log:utctime,pid,tags:filecount=32,filesize=64m
# due to internationalization enhancements in JDK 9 Elasticsearch need to set the provider to COMPAT otherwise
# time/date parsing will break in an incompatible way for some date patterns and locals
9-:-Djava.locale.providers=COMPAT
# temporary workaround for C2 bug with JDK 10 on hardware with AVX-512
10-:-XX:UseAVX=2
---
- name: Create cassandra containers and connect to network
docker_container:
name: "{{ item }}"
hostname: "{{ item }}"
image: "{{ cassandra_img }}"
networks:
- name: "{{ soctools_netname }}"
networks_cli_compatible: yes
volumes:
- "{{cassandra_name}}:/var/lib/cassandra"
entrypoint: "/bin/bash"
interactive: "yes"
with_items: "{{ groups['cassandra'] }}"
tags:
- start
- name: Disconnect cassandra containers from network and remove
docker_container:
name: "{{ item }}"
state: absent
with_items: "{{ groups['cassandra'] }}"
tags:
- stop
---
- name: Create cortex containers and connect to network
docker_container:
name: "{{ item }}"
hostname: "{{ item }}"
image: "{{ cortex_img }}"
networks:
- name: "{{ soctools_netname }}"
networks_cli_compatible: yes
# published_ports:
# - "9001:9001"
volumes:
- "{{item}}:/var/lib/elasticsearch/"
entrypoint: "/bin/bash"
interactive: "yes"
with_items: "{{ groups['cortex'] }}"
tags:
- start
- startcortex
- name: Disconnect cortex containers from network and remove
docker_container:
name: "{{ item }}"
state: absent
with_items: "{{ groups['cortex'] }}"
tags:
- stop
- stopcortex
......@@ -17,6 +17,8 @@
- "7750:7750"
- "5000-5099:5000-5099"
- "6000-6099:6000-6099"
- "9000:9000"
- "9001:9001"
entrypoint: "/bin/bash"
interactive: "yes"
tags:
......
......@@ -10,6 +10,9 @@
- include: odfekibana.yml
- include: misp.yml
- include: keycloak.yml
- include: cassandra.yml
- include: thehive.yml
- include: cortex.yml
- include: haproxy.yml
- include: networkremove.yml
......@@ -8,4 +8,5 @@
- subnet: "{{ soctools_network }}"
tags:
- start
- startcortex
---
- name: Create thehive containers and connect to network
docker_container:
name: "{{ item }}"
hostname: "{{ item }}"
image: "{{ thehive_img }}"
networks:
- name: "{{ soctools_netname }}"
networks_cli_compatible: yes
# published_ports:
# - "9000:9000"
entrypoint: "/bin/bash"
interactive: "yes"
with_items: "{{ groups['thehive'] }}"
tags:
- start
- name: Disconnect thehive containers from network and remove
docker_container:
name: "{{ item }}"
state: absent
with_items: "{{ groups['thehive'] }}"
tags:
- stop
......@@ -8,6 +8,22 @@
tags:
- start
- name: Create cassandra volumes
docker_volume:
name: "{{item}}"
with_items:
- "{{ groups['cassandra'] }}"
tags:
- start
- name: Create cortex volumes
docker_volume:
name: "{{item}}"
with_items:
- "{{ groups['cortex'] }}"
tags:
- start
- name: Create NiFi volumes
docker_volume:
name: "{{item}}"
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment