Updated Elastic stack in order to store log files and add (expirimental) dataset index

This commit is contained in:
George Kalampokis 2020-03-09 16:47:32 +02:00
parent ce4056b42f
commit c807d28c29
43 changed files with 1358 additions and 442 deletions

View File

@ -1,3 +1,3 @@
TAG=6.3.1
ELASTIC_VERSION=6.3.1
ELASTIC_PASSWORD=changeme
ELK_VERSION=7.6.0
# Leave blank to use the "basic" image flavours, which include X-Pack.
# see https://www.elastic.co/subscriptions

2
ELK.Docker/.gitattributes vendored Normal file
View File

@ -0,0 +1,2 @@
# Declare files that will always have LF line endings on checkout.
*.sh text eol=lf

View File

@ -1,201 +1,21 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
The MIT License (MIT)
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
Copyright (c) 2015 Anthony Lapenna
1. Definitions.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,25 +1,4 @@
# stack-docker
This example Docker Compose configuration demonstrates many components of the
Elastic Stack, all running on a single machine under Docker.
Init default users and retrieve passwords
## Prerequisites
- Docker and Compose. Windows and Mac users get Compose installed automatically
with Docker. Linux users can:
```
pip install docker-compose
```
- At least 4GiB of RAM for the containers. Windows and Mac users _must_
configure their Docker virtual machine to have more than the default 2 GiB of
RAM:
![Docker VM memory settings](screenshots/docker-vm-memory-settings.png)
## Starting the stack
Try `docker-compose up` to create a demonstration Elastic Stack with
Elasticsearch, Kibana, Logstash, Auditbeat, Metricbeat, Filebeat, Packetbeat,
and Heartbeat.
Point a browser at [`http://localhost:5601`](http://localhost:5601) to see the results.
Log in with `elastic` / `changeme`.
1) connect to elasticsearch container with docker exec -it elastichsearch /bin/bash
2) run ./bin/elasticsearch-setup-passwords auto >./data/passwords.txt (press y and enter when the console shows nothing)

View File

@ -0,0 +1,87 @@
version: '2.4'
services:
elasticsearch:
user: 1002:1002 #develuser
restart: unless-stopped
mem_limit: 2048m
environment:
- cluster.name=open-dmp-cluster
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xmx1024m -Xms1024m"
- xpack.license.self_generated.type=basic
- xpack.monitoring.collection.enabled=true
- xpack.security.enabled=true
ulimits:
nproc: 65535
memlock:
soft: -1
hard: -1
volumes:
- ./shared/config-elk/elasticsearch/config/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
- ./shared/config-elk/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
- ./shared/data-elk/elasticsearch-01-data:/usr/share/elasticsearch/data
- ./shared/data-elk/elasticsearch-01-log:/usr/share/elasticsearch/logs
#ports:
# - 51056:9200
# - 51057:9300
ports:
- "9200:9200"
expose:
- "9300"
networks:
open-dmp-elk-network:
logstash:
# user: 1002:1002 #develuser
volumes:
- ./shared/config-elk/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
- ./shared/config-elk/logstash/config/pipelines.yml:/usr/share/logstash/config/pipelines.yml:ro
- ./shared/config-elk/logstash/config/log4j2.properties:/usr/share/logstash/config/log4j2.properties:ro
- ./shared/config-elk/logstash/pipeline:/usr/share/logstash/pipeline:ro
- ./shared/config-elk/logstash/logstash/templates:/usr/share/logstash/templates
- ./shared/data-elk/logstash-log:/usr/share/logstash/logs
- ./shared/data-elk/logstash-queue:/usr/share/logstash/queue
- ./shared/data-elk/logstash-dead_letter_queue:/usr/share/logstash/dead_letter_queue
expose:
- "31311"
- "31312"
restart: on-failure
mem_limit: 2048m
environment:
- LS_JAVA_OPTS=-Xmx1024m -Xms1024m
- xpack.license.self_generated.type=basic
- xpack.security.enabled=true
networks:
open-dmp-elk-network:
kibana:
# user: 1002:1002 #develuser
mem_limit: 512m
environment:
- xpack.license.self_generated.type=basic
- xpack.security.enabled=true
volumes:
- ./shared/config-elk/kibana/config:/usr/share/kibana/config:ro
#- ./shared/config-elk/kibana/certificates:/usr/share/kibana/certificates
restart: unless-stopped
ports:
- "51058:5601"
networks:
- open-dmp-elk-network
filebeat:
restart: unless-stopped
mem_limit: 256m
#command: [ "-e=false" ] # to overwrite the -e that disables logging to file!
volumes:
- ./shared/config-elk/filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
- ~/openDMP/logs:/usr/share/filebeat/log_data/dmp/
- ./shared/data-elk/filebeat-log:/usr/share/filebeat/logs
- ./shared/data-elk/filebeat-data:/usr/share/filebeat/data #For windows if we mount the data directory we get "Writing of registry returned error: sync /usr/share/filebeat/data/registry/filebeat: invalid argument."
networks:
- open-dmp-elk-network
networks:
open-dmp-elk-network:

View File

@ -1,171 +1,43 @@
---
version: '3'
version: '2.4'
services:
# The environment variable "TAG" is used throughout this file to
# specify the version of the images to run. The default is set in the
# '.env' file in this folder. It can be overridden with any normal
# technique for setting environment variables, for example:
#
# TAG=6.0.0-beta1 docker-compose up
#
# REF: https://docs.docker.com/compose/compose-file/#variable-substitution
#
# Also be sure to set the ELASTIC_VERSION variable. For released versions,
# ${TAG} and ${ELASTIC_VERSION} will be identical, but for pre-release
# versions, ${TAG} might contain an extra build identifier, like
# "6.0.0-beta1-3eab5b40", so a full invocation might look like:
#
# ELASTIC_VERSION=6.0.0-beta1 TAG=6.0.0-beta1-3eab5b40 docker-compose up
#
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:${TAG}
image: ${DOCKER_REGISTRY}elasticsearch
container_name: elasticsearch
#volumes:
# - esdata:/usr/share/elasticsearch/data
environment: ['http.host=0.0.0.0', 'transport.host=127.0.0.1', 'ELASTIC_PASSWORD=${ELASTIC_PASSWORD}']
ports: ['0.0.0.0:9200:9200']
networks: ['stack']
build:
context: elasticsearch/
args:
ELK_VERSION: $ELK_VERSION
healthcheck:
# test: curl --cacert /usr/share/elasticsearch/config/certificates/ca/ca.crt -s https://localhost:9200 >/dev/null; if [[ $$? == 52 ]]; then echo 0; else echo 1; fi
interval: 30s
timeout: 10s
retries: 5
logstash:
image: ${DOCKER_REGISTRY}logstash
container_name: logstash
build:
context: logstash/
args:
ELK_VERSION: $ELK_VERSION
depends_on:
- elasticsearch
kibana:
image: docker.elastic.co/kibana/kibana:${TAG}
container_name: kibana
ports: ['0.0.0.0:5601:5601']
networks: ['stack']
depends_on: ['elasticsearch']
logstash:
image: docker.elastic.co/logstash/logstash:${TAG}
container_name: logstash
# Provide a simple pipeline configuration for Logstash with a bind-mounted file.
volumes:
- ./config/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
ports: ['0.0.0.0:31311:31311']
networks: ['stack']
depends_on: ['elasticsearch', 'setup_logstash']
image: ${DOCKER_REGISTRY}kibana
build:
context: kibana/
args:
ELK_VERSION: $ELK_VERSION
depends_on:
- elasticsearch
filebeat:
image: docker.elastic.co/beats/filebeat:${TAG}
container_name: filebeat
command: -e -E 'output.elasticsearch.password=${ELASTIC_PASSWORD}'
# If the host system has logs at "/var/log", mount them at "/mnt/log"
# inside the container, where Filebeat can find them.
# volumes: ['/var/log:/mnt/log:ro']
networks: ['stack']
depends_on: ['elasticsearch', 'setup_filebeat']
heartbeat:
image: docker.elastic.co/beats/heartbeat:${TAG}
container_name: heartbeat
command: -e -E 'output.elasticsearch.password=${ELASTIC_PASSWORD}'
networks: ['stack']
depends_on: ['elasticsearch', 'setup_heartbeat']
# Run a short-lived container to set up Logstash.
setup_logstash:
image: centos:7
container_name: setup_logstash
volumes: ['./scripts/setup-logstash.sh:/usr/local/bin/setup-logstash.sh:ro']
# The script may have CR/LF line endings if using Docker for Windows, so
# make sure that they don't confuse Bash.
command: ['/bin/bash', '-c', 'cat /usr/local/bin/setup-logstash.sh | tr -d "\r" | bash']
environment: ['ELASTIC_PASSWORD=${ELASTIC_PASSWORD}']
networks: ['stack']
depends_on: ['elasticsearch']
setup_kibana:
image: centos:7
container_name: setup_kibana
volumes: ['./scripts/setup-kibana.sh:/usr/local/bin/setup-kibana.sh:ro']
command: ['/bin/bash', '-c', 'cat /usr/local/bin/setup-kibana.sh | tr -d "\r" | bash']
environment: ['ELASTIC_PASSWORD=${ELASTIC_PASSWORD}']
networks: ['stack']
depends_on: ['elasticsearch']
setup_filebeat:
image: docker.elastic.co/beats/filebeat:${TAG}
container_name: setup_filebeat
volumes: ['./scripts/setup-beat.sh:/usr/local/bin/setup-beat.sh:ro']
command: ['/bin/bash', '-c', 'cat /usr/local/bin/setup-beat.sh | tr -d "\r" | bash -s filebeat']
environment: ['ELASTIC_PASSWORD=${ELASTIC_PASSWORD}']
networks: ['stack']
depends_on: ['kibana']
setup_heartbeat:
image: docker.elastic.co/beats/heartbeat:${TAG}
container_name: setup_heartbeat
volumes: ['./scripts/setup-beat.sh:/usr/local/bin/setup-beat.sh:ro']
command: ['/bin/bash', '-c', 'cat /usr/local/bin/setup-beat.sh | tr -d "\r" | bash -s heartbeat']
environment: ['ELASTIC_PASSWORD=${ELASTIC_PASSWORD}']
networks: ['stack']
depends_on: ['kibana']
##########################DOCSBOX######################################################################
web:
restart: always
build: ./docsbox-master/docsbox
expose:
- "8000"
links:
- redis:redis
volumes:
- docsbox:/home/docsbox
- media:/home/docsbox/media
command: gunicorn -b :8000 docsbox:app
networks: ['stack']
rqworker:
restart: always
build: ./docsbox-master/docsbox
links:
- redis:redis
volumes:
- web
command: rq worker -c docsbox.settings
networks: ['stack']
rqscheduler:
restart: always
build: ./docsbox-master/docsbox
links:
- redis:redis
volumes:
- web
command: rqscheduler -H redis -p 6379 -d 0
networks: ['stack']
nginx:
restart: always
build: ./docsbox-master/nginx/
ports:
- "81:80"
volumes:
- web
links:
- web:web
networks: ['stack']
redis:
restart: always
image: redis:latest
expose:
- "6379"
volumes:
- redisdata:/data
networks: ['stack']
##########################SETTIGNS######################################################################
volumes:
#esdata:
#driver: local
redisdata:
driver: local
docsbox:
driver: local
media:
driver: local
networks: {stack: {}}
image: ${DOCKER_REGISTRY}filebeat
build:
context: filebeat/
args:
ELK_VERSION: $ELK_VERSION
depends_on:
- logstash

View File

@ -0,0 +1,22 @@
ARG ELK_VERSION
# https://github.com/elastic/elasticsearch-docker
FROM docker.elastic.co/elasticsearch/elasticsearch:${ELK_VERSION}
RUN /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-icu && \
/usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-phonetic
RUN groupmod -g 1002 elasticsearch
RUN usermod -u 1002 -g 1002 elasticsearch
RUN chown -R elasticsearch /usr/share/elasticsearch
RUN sed -i -e 's/--userspec=1000/--userspec=1002/g' \
-e 's/UID 1000/UID 1002/' \
-e 's/chown -R 1000/chown -R 1002/' /usr/local/bin/docker-entrypoint.sh
RUN chown elasticsearch /usr/local/bin/docker-entrypoint.sh
ENV JAVA_HOME /usr/share/elasticsearch/jdk
# RUN mkdir /usr/share/elasticsearch/custom-plugins
# COPY plugins/elasticsearch-analysis-greeklish-7.5.1.zip /usr/share/elasticsearch/custom-plugins/elasticsearch-analysis-greeklish-7.5.1.zip
# RUN /usr/share/elasticsearch/bin/elasticsearch-plugin install file:///usr/share/elasticsearch/custom-plugins/elasticsearch-analysis-greeklish-7.5.1.zip

View File

@ -0,0 +1,15 @@
ARG ELK_VERSION
FROM docker.elastic.co/beats/filebeat:${ELK_VERSION}
# USER root
# RUN groupmod -g 1002 filebeat
# RUN usermod -u 1002 -g 1002 filebeat
# RUN chown -R filebeat /usr/share/filebeat
# RUN sed -i -e 's/--userspec=1000/--userspec=1002/g' \
# -e 's/UID 1000/UID 1002/' \
# -e 's/chown -R 1000/chown -R 1002/' /usr/local/bin/docker-entrypoint
# RUN chown filebeat /usr/local/bin/docker-entrypoint
# USER 1002:1002

View File

@ -0,0 +1,15 @@
ARG ELK_VERSION
# https://github.com/elastic/kibana-docker
FROM docker.elastic.co/kibana/kibana:${ELK_VERSION}
# USER root
# RUN groupmod -g 1002 kibana
# RUN usermod -g 1002 root
# RUN usermod -u 1002 -g 1002 kibana
# RUN chown -R kibana /usr/share/kibana
# USER 1002:1002
# Add your kibana plugins setup here
# Example: RUN kibana-plugin install <name|url>

View File

@ -0,0 +1,20 @@
ARG ELK_VERSION
# https://github.com/elastic/logstash-docker
FROM docker.elastic.co/logstash/logstash:${ELK_VERSION}
# USER root
# RUN groupmod -g 1002 logstash
# RUN usermod -u 1002 -g 1002 logstash
# RUN chown -R logstash /usr/share/logstash
# RUN sed -i -e 's/--userspec=1000/--userspec=1002/g' \
# -e 's/UID 1000/UID 1002/' \
# -e 's/chown -R 1000/chown -R 1002/' /usr/local/bin/docker-entrypoint
# RUN chown logstash /usr/local/bin/docker-entrypoint
# USER 1002:1002
# Add your logstash plugins setup here
# Example: RUN logstash-plugin install logstash-filter-json
RUN logstash-plugin update logstash-input-beats
RUN logstash-plugin update logstash-filter-grok

3
ELK.Docker/old/.env Normal file
View File

@ -0,0 +1,3 @@
TAG=6.3.1
ELASTIC_VERSION=6.3.1
ELASTIC_PASSWORD=changeme

201
ELK.Docker/old/LICENSE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
ELK.Docker/old/README.md Normal file
View File

@ -0,0 +1,25 @@
# stack-docker
This example Docker Compose configuration demonstrates many components of the
Elastic Stack, all running on a single machine under Docker.
## Prerequisites
- Docker and Compose. Windows and Mac users get Compose installed automatically
with Docker. Linux users can:
```
pip install docker-compose
```
- At least 4GiB of RAM for the containers. Windows and Mac users _must_
configure their Docker virtual machine to have more than the default 2 GiB of
RAM:
![Docker VM memory settings](screenshots/docker-vm-memory-settings.png)
## Starting the stack
Try `docker-compose up` to create a demonstration Elastic Stack with
Elasticsearch, Kibana, Logstash, Auditbeat, Metricbeat, Filebeat, Packetbeat,
and Heartbeat.
Point a browser at [`http://localhost:5601`](http://localhost:5601) to see the results.
Log in with `elastic` / `changeme`.

View File

@ -0,0 +1,171 @@
---
version: '3'
services:
# The environment variable "TAG" is used throughout this file to
# specify the version of the images to run. The default is set in the
# '.env' file in this folder. It can be overridden with any normal
# technique for setting environment variables, for example:
#
# TAG=6.0.0-beta1 docker-compose up
#
# REF: https://docs.docker.com/compose/compose-file/#variable-substitution
#
# Also be sure to set the ELASTIC_VERSION variable. For released versions,
# ${TAG} and ${ELASTIC_VERSION} will be identical, but for pre-release
# versions, ${TAG} might contain an extra build identifier, like
# "6.0.0-beta1-3eab5b40", so a full invocation might look like:
#
# ELASTIC_VERSION=6.0.0-beta1 TAG=6.0.0-beta1-3eab5b40 docker-compose up
#
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:${TAG}
container_name: elasticsearch
#volumes:
# - esdata:/usr/share/elasticsearch/data
environment: ['http.host=0.0.0.0', 'transport.host=127.0.0.1', 'ELASTIC_PASSWORD=${ELASTIC_PASSWORD}']
ports: ['0.0.0.0:9200:9200']
networks: ['stack']
kibana:
image: docker.elastic.co/kibana/kibana:${TAG}
container_name: kibana
ports: ['0.0.0.0:5601:5601']
networks: ['stack']
depends_on: ['elasticsearch']
logstash:
image: docker.elastic.co/logstash/logstash:${TAG}
container_name: logstash
# Provide a simple pipeline configuration for Logstash with a bind-mounted file.
volumes:
- ./config/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
ports: ['0.0.0.0:31311:31311']
networks: ['stack']
depends_on: ['elasticsearch', 'setup_logstash']
filebeat:
image: docker.elastic.co/beats/filebeat:${TAG}
container_name: filebeat
command: -e -E 'output.elasticsearch.password=${ELASTIC_PASSWORD}'
# If the host system has logs at "/var/log", mount them at "/mnt/log"
# inside the container, where Filebeat can find them.
# volumes: ['/var/log:/mnt/log:ro']
networks: ['stack']
depends_on: ['elasticsearch', 'setup_filebeat']
heartbeat:
image: docker.elastic.co/beats/heartbeat:${TAG}
container_name: heartbeat
command: -e -E 'output.elasticsearch.password=${ELASTIC_PASSWORD}'
networks: ['stack']
depends_on: ['elasticsearch', 'setup_heartbeat']
# Run a short-lived container to set up Logstash.
setup_logstash:
image: centos:7
container_name: setup_logstash
volumes: ['./scripts/setup-logstash.sh:/usr/local/bin/setup-logstash.sh:ro']
# The script may have CR/LF line endings if using Docker for Windows, so
# make sure that they don't confuse Bash.
command: ['/bin/bash', '-c', 'cat /usr/local/bin/setup-logstash.sh | tr -d "\r" | bash']
environment: ['ELASTIC_PASSWORD=${ELASTIC_PASSWORD}']
networks: ['stack']
depends_on: ['elasticsearch']
setup_kibana:
image: centos:7
container_name: setup_kibana
volumes: ['./scripts/setup-kibana.sh:/usr/local/bin/setup-kibana.sh:ro']
command: ['/bin/bash', '-c', 'cat /usr/local/bin/setup-kibana.sh | tr -d "\r" | bash']
environment: ['ELASTIC_PASSWORD=${ELASTIC_PASSWORD}']
networks: ['stack']
depends_on: ['elasticsearch']
setup_filebeat:
image: docker.elastic.co/beats/filebeat:${TAG}
container_name: setup_filebeat
volumes: ['./scripts/setup-beat.sh:/usr/local/bin/setup-beat.sh:ro']
command: ['/bin/bash', '-c', 'cat /usr/local/bin/setup-beat.sh | tr -d "\r" | bash -s filebeat']
environment: ['ELASTIC_PASSWORD=${ELASTIC_PASSWORD}']
networks: ['stack']
depends_on: ['kibana']
setup_heartbeat:
image: docker.elastic.co/beats/heartbeat:${TAG}
container_name: setup_heartbeat
volumes: ['./scripts/setup-beat.sh:/usr/local/bin/setup-beat.sh:ro']
command: ['/bin/bash', '-c', 'cat /usr/local/bin/setup-beat.sh | tr -d "\r" | bash -s heartbeat']
environment: ['ELASTIC_PASSWORD=${ELASTIC_PASSWORD}']
networks: ['stack']
depends_on: ['kibana']
##########################DOCSBOX######################################################################
# web:
# restart: always
# build: ./docsbox-master/docsbox
# expose:
# - "8000"
# links:
# - redis:redis
# volumes:
# - docsbox:/home/docsbox
# - media:/home/docsbox/media
# command: gunicorn -b :8000 docsbox:app
# networks: ['stack']
#
# rqworker:
# restart: always
# build: ./docsbox-master/docsbox
# links:
# - redis:redis
# volumes:
# - web
# command: rq worker -c docsbox.settings
# networks: ['stack']
#
# rqscheduler:
# restart: always
# build: ./docsbox-master/docsbox
# links:
# - redis:redis
# volumes:
# - web
# command: rqscheduler -H redis -p 6379 -d 0
# networks: ['stack']
#
# nginx:
# restart: always
# build: ./docsbox-master/nginx/
# ports:
# - "81:80"
# volumes:
# - web
# links:
# - web:web
# networks: ['stack']
#
# redis:
# restart: always
# image: redis:latest
# expose:
# - "6379"
# volumes:
# - redisdata:/data
# networks: ['stack']
##########################SETTIGNS######################################################################
volumes:
#esdata:
#driver: local
redisdata:
driver: local
docsbox:
driver: local
media:
driver: local
networks: {stack: {}}

View File

Before

Width:  |  Height:  |  Size: 46 KiB

After

Width:  |  Height:  |  Size: 46 KiB

View File

@ -0,0 +1,20 @@
---
## Default Elasticsearch configuration from elasticsearch-docker.
## from https://github.com/elastic/elasticsearch-docker/blob/master/build/elasticsearch/elasticsearch.yml
#
network.host: 0.0.0.0
# minimum_master_nodes need to be explicitly set when bound on a public IP
# set to 1 to allow single node clusters
# Details: https://github.com/elastic/elasticsearch/pull/17288
# discovery.zen.minimum_master_nodes: 1
## Use single node discovery in order to disable production mode and avoid bootstrap checks
## see https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
#
discovery.type: single-node
## Search Guard
#
cluster.routing.allocation.disk.watermark.flood_stage: 99%

View File

@ -0,0 +1,179 @@
#https://github.com/elastic/elasticsearch/blob/7.4/distribution/src/config/log4j2.properties
status = error
# log action execution errors for easier debugging
logger.action.name = org.elasticsearch.action
logger.action.level = debug
appender.console.type = Console
appender.console.name = console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
######## Server JSON ############################
appender.rolling.type = RollingFile
appender.rolling.name = rolling
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json
appender.rolling.layout.type = ESJsonLayout
appender.rolling.layout.type_name = server
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz
appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1
appender.rolling.policies.time.modulate = true
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.rolling.policies.size.size = 128MB
appender.rolling.strategy.type = DefaultRolloverStrategy
appender.rolling.strategy.fileIndex = nomax
appender.rolling.strategy.action.type = Delete
appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path}
appender.rolling.strategy.action.condition.type = IfFileName
appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB
################################################
######## Server - old style pattern ###########
appender.rolling_old.type = RollingFile
appender.rolling_old.name = rolling_old
appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
appender.rolling_old.layout.type = PatternLayout
appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
appender.rolling_old.policies.type = Policies
appender.rolling_old.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling_old.policies.time.interval = 1
appender.rolling_old.policies.time.modulate = true
appender.rolling_old.policies.size.type = SizeBasedTriggeringPolicy
appender.rolling_old.policies.size.size = 128MB
appender.rolling_old.strategy.type = DefaultRolloverStrategy
appender.rolling_old.strategy.fileIndex = nomax
appender.rolling_old.strategy.action.type = Delete
appender.rolling_old.strategy.action.basepath = ${sys:es.logs.base_path}
appender.rolling_old.strategy.action.condition.type = IfFileName
appender.rolling_old.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
appender.rolling_old.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
appender.rolling_old.strategy.action.condition.nested_condition.exceeds = 2GB
################################################
rootLogger.level = info
rootLogger.appenderRef.console.ref = console
rootLogger.appenderRef.rolling.ref = rolling
rootLogger.appenderRef.rolling_old.ref = rolling_old
######## Deprecation JSON #######################
appender.deprecation_rolling.type = RollingFile
appender.deprecation_rolling.name = deprecation_rolling
appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.json
appender.deprecation_rolling.layout.type = ESJsonLayout
appender.deprecation_rolling.layout.type_name = deprecation
appender.deprecation_rolling.layout.esmessagefields=x-opaque-id
appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.json.gz
appender.deprecation_rolling.policies.type = Policies
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.deprecation_rolling.policies.size.size = 1GB
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
appender.deprecation_rolling.strategy.max = 4
#################################################
######## Deprecation - old style pattern #######
appender.deprecation_rolling_old.type = RollingFile
appender.deprecation_rolling_old.name = deprecation_rolling_old
appender.deprecation_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
appender.deprecation_rolling_old.layout.type = PatternLayout
appender.deprecation_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
appender.deprecation_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
_deprecation-%i.log.gz
appender.deprecation_rolling_old.policies.type = Policies
appender.deprecation_rolling_old.policies.size.type = SizeBasedTriggeringPolicy
appender.deprecation_rolling_old.policies.size.size = 1GB
appender.deprecation_rolling_old.strategy.type = DefaultRolloverStrategy
appender.deprecation_rolling_old.strategy.max = 4
#################################################
logger.deprecation.name = org.elasticsearch.deprecation
logger.deprecation.level = warn
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
logger.deprecation.appenderRef.deprecation_rolling_old.ref = deprecation_rolling_old
logger.deprecation.additivity = false
######## Search slowlog JSON ####################
appender.index_search_slowlog_rolling.type = RollingFile
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\
.cluster_name}_index_search_slowlog.json
appender.index_search_slowlog_rolling.layout.type = ESJsonLayout
appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog
appender.index_search_slowlog_rolling.layout.esmessagefields=message,took,took_millis,total_hits,types,stats,search_type,total_shards,source,id
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\
.cluster_name}_index_search_slowlog-%i.json.gz
appender.index_search_slowlog_rolling.policies.type = Policies
appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.index_search_slowlog_rolling.policies.size.size = 1GB
appender.index_search_slowlog_rolling.strategy.type = DefaultRolloverStrategy
appender.index_search_slowlog_rolling.strategy.max = 4
#################################################
######## Search slowlog - old style pattern ####
appender.index_search_slowlog_rolling_old.type = RollingFile
appender.index_search_slowlog_rolling_old.name = index_search_slowlog_rolling_old
appender.index_search_slowlog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
_index_search_slowlog.log
appender.index_search_slowlog_rolling_old.layout.type = PatternLayout
appender.index_search_slowlog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
appender.index_search_slowlog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
_index_search_slowlog-%i.log.gz
appender.index_search_slowlog_rolling_old.policies.type = Policies
appender.index_search_slowlog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy
appender.index_search_slowlog_rolling_old.policies.size.size = 1GB
appender.index_search_slowlog_rolling_old.strategy.type = DefaultRolloverStrategy
appender.index_search_slowlog_rolling_old.strategy.max = 4
#################################################
logger.index_search_slowlog_rolling.name = index.search.slowlog
logger.index_search_slowlog_rolling.level = trace
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling_old.ref = index_search_slowlog_rolling_old
logger.index_search_slowlog_rolling.additivity = false
######## Indexing slowlog JSON ##################
appender.index_indexing_slowlog_rolling.type = RollingFile
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
_index_indexing_slowlog.json
appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout
appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog
appender.index_indexing_slowlog_rolling.layout.esmessagefields=message,took,took_millis,doc_type,id,routing,source
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
_index_indexing_slowlog-%i.json.gz
appender.index_indexing_slowlog_rolling.policies.type = Policies
appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.index_indexing_slowlog_rolling.policies.size.size = 1GB
appender.index_indexing_slowlog_rolling.strategy.type = DefaultRolloverStrategy
appender.index_indexing_slowlog_rolling.strategy.max = 4
#################################################
######## Indexing slowlog - old style pattern ##
appender.index_indexing_slowlog_rolling_old.type = RollingFile
appender.index_indexing_slowlog_rolling_old.name = index_indexing_slowlog_rolling_old
appender.index_indexing_slowlog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
_index_indexing_slowlog.log
appender.index_indexing_slowlog_rolling_old.layout.type = PatternLayout
appender.index_indexing_slowlog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
appender.index_indexing_slowlog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
_index_indexing_slowlog-%i.log.gz
appender.index_indexing_slowlog_rolling_old.policies.type = Policies
appender.index_indexing_slowlog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy
appender.index_indexing_slowlog_rolling_old.policies.size.size = 1GB
appender.index_indexing_slowlog_rolling_old.strategy.type = DefaultRolloverStrategy
appender.index_indexing_slowlog_rolling_old.strategy.max = 4
#################################################
logger.index_indexing_slowlog.name = index.indexing.slowlog.index
logger.index_indexing_slowlog.level = trace
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling_old.ref = index_indexing_slowlog_rolling_old
logger.index_indexing_slowlog.additivity = false

View File

@ -0,0 +1,16 @@
#filebeat.registry_file: /usr/share/filebeat/registry
filebeat.inputs:
- type: log
paths:
- /usr/share/filebeat/log_data/dmp/openDMP*.log
tags: ["audit"]
enabled: true
reload.enabled: true
reload.period: 10s
multiline.pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}'
multiline.negate: true
multiline.match: after
output.logstash:
hosts: ["logstash:31312"]
bulk_max_size: 128

View File

@ -0,0 +1,17 @@
---
## Default Kibana configuration from kibana-docker.
## from https://github.com/elastic/kibana-docker/blob/master/build/kibana/config/kibana.yml
#
server.name: kibana
server.host: "0"
## Custom configuration
#
#server.basePath: "/eformslogs"
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
#elasticsearch.ssl.certificateAuthorities: [ "/usr/share/kibana/certificate_authorities/ca.crt" ]
elasticsearch.username: "kibana"
elasticsearch.password: ""
server.ssl.enabled: false
#server.ssl.key: "/usr/share/kibana/certificates/kibana.key"
#server.ssl.certificate: "/usr/share/kibana/certificates/kibana.crt"

View File

@ -0,0 +1,103 @@
#https://github.com/elastic/logstash/blob/7.4/config/log4j2.properties
status = error
name = LogstashPropertiesConfig
appender.console.type = Console
appender.console.name = plain_console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]} %m%n
appender.json_console.type = Console
appender.json_console.name = json_console
appender.json_console.layout.type = JSONLayout
appender.json_console.layout.compact = true
appender.json_console.layout.eventEol = true
appender.rolling.type = RollingFile
appender.rolling.name = plain_rolling
appender.rolling.fileName = ${sys:ls.logs}/logstash-${sys:ls.log.format}.log
appender.rolling.filePattern = ${sys:ls.logs}/logstash-${sys:ls.log.format}-%d{yyyy-MM-dd}-%i.log.gz
appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1
appender.rolling.policies.time.modulate = true
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]} %m%n
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.rolling.policies.size.size = 100MB
appender.rolling.strategy.type = DefaultRolloverStrategy
appender.rolling.strategy.max = 30
appender.json_rolling.type = RollingFile
appender.json_rolling.name = json_rolling
appender.json_rolling.fileName = ${sys:ls.logs}/logstash-${sys:ls.log.format}.log
appender.json_rolling.filePattern = ${sys:ls.logs}/logstash-${sys:ls.log.format}-%d{yyyy-MM-dd}-%i.log.gz
appender.json_rolling.policies.type = Policies
appender.json_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.json_rolling.policies.time.interval = 1
appender.json_rolling.policies.time.modulate = true
appender.json_rolling.layout.type = JSONLayout
appender.json_rolling.layout.compact = true
appender.json_rolling.layout.eventEol = true
appender.json_rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.json_rolling.policies.size.size = 100MB
appender.json_rolling.strategy.type = DefaultRolloverStrategy
appender.json_rolling.strategy.max = 30
rootLogger.level = ${sys:ls.log.level}
rootLogger.appenderRef.console.ref = ${sys:ls.log.format}_console
rootLogger.appenderRef.rolling.ref = ${sys:ls.log.format}_rolling
# Slowlog
appender.console_slowlog.type = Console
appender.console_slowlog.name = plain_console_slowlog
appender.console_slowlog.layout.type = PatternLayout
appender.console_slowlog.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n
appender.json_console_slowlog.type = Console
appender.json_console_slowlog.name = json_console_slowlog
appender.json_console_slowlog.layout.type = JSONLayout
appender.json_console_slowlog.layout.compact = true
appender.json_console_slowlog.layout.eventEol = true
appender.rolling_slowlog.type = RollingFile
appender.rolling_slowlog.name = plain_rolling_slowlog
appender.rolling_slowlog.fileName = ${sys:ls.logs}/logstash-slowlog-${sys:ls.log.format}.log
appender.rolling_slowlog.filePattern = ${sys:ls.logs}/logstash-slowlog-${sys:ls.log.format}-%d{yyyy-MM-dd}-%i.log.gz
appender.rolling_slowlog.policies.type = Policies
appender.rolling_slowlog.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling_slowlog.policies.time.interval = 1
appender.rolling_slowlog.policies.time.modulate = true
appender.rolling_slowlog.layout.type = PatternLayout
appender.rolling_slowlog.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n
appender.rolling_slowlog.policies.size.type = SizeBasedTriggeringPolicy
appender.rolling_slowlog.policies.size.size = 100MB
appender.rolling_slowlog.strategy.type = DefaultRolloverStrategy
appender.rolling_slowlog.strategy.max = 30
appender.json_rolling_slowlog.type = RollingFile
appender.json_rolling_slowlog.name = json_rolling_slowlog
appender.json_rolling_slowlog.fileName = ${sys:ls.logs}/logstash-slowlog-${sys:ls.log.format}.log
appender.json_rolling_slowlog.filePattern = ${sys:ls.logs}/logstash-slowlog-${sys:ls.log.format}-%d{yyyy-MM-dd}-%i.log.gz
appender.json_rolling_slowlog.policies.type = Policies
appender.json_rolling_slowlog.policies.time.type = TimeBasedTriggeringPolicy
appender.json_rolling_slowlog.policies.time.interval = 1
appender.json_rolling_slowlog.policies.time.modulate = true
appender.json_rolling_slowlog.layout.type = JSONLayout
appender.json_rolling_slowlog.layout.compact = true
appender.json_rolling_slowlog.layout.eventEol = true
appender.json_rolling_slowlog.policies.size.type = SizeBasedTriggeringPolicy
appender.json_rolling_slowlog.policies.size.size = 100MB
appender.json_rolling_slowlog.strategy.type = DefaultRolloverStrategy
appender.json_rolling_slowlog.strategy.max = 30
logger.slowlog.name = slowlog
logger.slowlog.level = trace
logger.slowlog.appenderRef.console_slowlog.ref = ${sys:ls.log.format}_console_slowlog
logger.slowlog.appenderRef.rolling_slowlog.ref = ${sys:ls.log.format}_rolling_slowlog
logger.slowlog.additivity = false
logger.licensereader.name = logstash.licensechecker.licensereader
logger.licensereader.level = error

View File

@ -0,0 +1,10 @@
---
## Default Logstash configuration from logstash-docker.
## from https://github.com/elastic/logstash-docker/blob/master/build/logstash/config/logstash-oss.yml
#
http.host: "0.0.0.0"
config.reload.automatic: true
config.reload.interval: 300s
path.queue: /usr/share/logstash/queue
path.dead_letter_queue: /usr/share/logstash/dead_letter_queue
xpack.monitoring.elasticsearch.password:

View File

@ -0,0 +1,18 @@
- pipeline.id: open_dmp_beats
queue.type: persisted
queue.max_bytes: 50mb
dead_letter_queue.enable: true
path.config: "/usr/share/logstash/pipeline/open_dmp_beats.conf"
queue.checkpoint.writes: 32
- pipeline.id: open_dmp_main
queue.type: persisted
queue.max_bytes: 50mb
dead_letter_queue.enable: true
path.config: "/usr/share/logstash/pipeline/open_dmp_main.conf"
queue.checkpoint.writes: 32
- pipeline.id: open_dmp_send_to_elastic
queue.type: persisted
queue.max_bytes: 50mb
dead_letter_queue.enable: true
path.config: "/usr/share/logstash/pipeline/open_dmp_send_to_elastic.conf"
queue.checkpoint.writes: 32

View File

@ -0,0 +1,14 @@
input {
beats {
port => 31312
ssl => false
client_inactivity_timeout => 3000
}
}
filter {
}
output {
pipeline { send_to => open_dmp_main }
}

View File

@ -0,0 +1,19 @@
input {
pipeline { address => open_dmp_main }
}
filter {
grok {
match => { "message" => "(?<timestamp>%{DATE} %{TIME})%{SPACE}%{LOGLEVEL:level} %{NUMBER:pid} --- \[%{DATA:thread}\] %{DATA:class}%{SPACE}: %{GREEDYDATA:logmessage}" }
}
if "_grokparsefailure" not in [tags] {
mutate
{
remove_field => [ "message" ]
}
}
}
output {
pipeline { send_to => open_dmp_send_to_elastic }
}

View File

@ -0,0 +1,19 @@
input {
pipeline { address => open_dmp_send_to_elastic }
}
filter {
}
output {
elasticsearch {
hosts => "elasticsearch:9200"
user => elastic
password =>
index =>"opendmp.logs"
#manage_template => true
#template => "/usr/share/logstash/templates/audit/cite_elas_openDMP.json"
#template_name => "cite.elas.openDMP-audit*"
#template_overwrite => true
}
}

View File

@ -3,18 +3,73 @@ package eu.eudat.elastic.criteria;
import eu.eudat.elastic.entities.Tag;
import java.util.List;
import java.util.UUID;
/**
* Created by ikalyvas on 7/5/2018.
*/
public class DatasetCriteria extends Criteria {
public List<Tag> tags;
private String label;
private List<UUID> datasetTemplates;
private Short status;
private List<UUID> dmps;
private List<UUID> grants;
private List<UUID> collaborators;
// public List<Tag> tags;
public List<Tag> getTags() {
public String getLabel() {
return label;
}
public void setLabel(String label) {
this.label = label;
}
public List<UUID> getDatasetTemplates() {
return datasetTemplates;
}
public void setDatasetTemplates(List<UUID> datasetTemplates) {
this.datasetTemplates = datasetTemplates;
}
public Short getStatus() {
return status;
}
public void setStatus(Short status) {
this.status = status;
}
public List<UUID> getDmps() {
return dmps;
}
public void setDmps(List<UUID> dmps) {
this.dmps = dmps;
}
public List<UUID> getGrants() {
return grants;
}
public void setGrants(List<UUID> grants) {
this.grants = grants;
}
public List<UUID> getCollaborators() {
return collaborators;
}
public void setCollaborators(List<UUID> collaborators) {
this.collaborators = collaborators;
}
/*public List<Tag> getTags() {
return tags;
}
public void setTags(List<Tag> tags) {
this.tags = tags;
}
}*/
}

View File

@ -0,0 +1,42 @@
package eu.eudat.elastic.entities;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Map;
import java.util.UUID;
public class Collaborator implements ElasticEntity<Collaborator> {
private UUID id;
private String name;
public UUID getId() {
return id;
}
public void setId(UUID id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public XContentBuilder toElasticEntity(XContentBuilder builder) throws IOException {
builder.startObject();
builder.field("id", this.id.toString());
builder.field("name", this.name);
builder.endObject();
return builder;
}
@Override
public Collaborator fromElasticEntity(Map<String, Object> fields) {
return null;
}
}

View File

@ -8,6 +8,8 @@ import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.stream.Collectors;
/**
* Created by ikalyvas on 7/5/2018.
@ -16,7 +18,13 @@ public class Dataset implements ElasticEntity<Dataset> {
private static final Logger logger = LoggerFactory.getLogger(Dataset.class);
private String id;
private List<Tag> tags = new LinkedList<>();
//private List<Tag> tags = new LinkedList<>();
private String label;
private UUID template;
private Short status;
private UUID dmp;
private UUID grant;
private List<Collaborator> collaborators;
public String getId() {
return id;
@ -26,19 +34,82 @@ public class Dataset implements ElasticEntity<Dataset> {
this.id = id;
}
public List<Tag> getTags() {
/*public List<Tag> getTags() {
return tags;
}
public void setTags(List<Tag> tags) {
this.tags = tags;
}*/
public String getLabel() {
return label;
}
public void setLabel(String label) {
this.label = label;
}
public UUID getTemplate() {
return template;
}
public void setTemplate(UUID template) {
this.template = template;
}
public Short getStatus() {
return status;
}
public void setStatus(Short status) {
this.status = status;
}
public UUID getDmp() {
return dmp;
}
public void setDmp(UUID dmp) {
this.dmp = dmp;
}
public UUID getGrant() {
return grant;
}
public void setGrant(UUID grant) {
this.grant = grant;
}
public List<Collaborator> getCollaborators() {
return collaborators;
}
public void setCollaborators(List<Collaborator> collaborators) {
this.collaborators = collaborators;
}
@Override
public XContentBuilder toElasticEntity(XContentBuilder builder) throws IOException {
builder.startObject();
builder.field("id", this.id);
builder.startArray("tags");
builder.field("label", this.label);
builder.field("template", this.template.toString());
builder.field("status", this.status.toString());
builder.field("dmp", this.dmp.toString());
builder.field("grant", this.grant.toString());
builder.startArray("collaborators");
this.collaborators.forEach(x -> {
try {
x.toElasticEntity(builder);
} catch (IOException e) {
logger.error(e.getMessage(), e);
}
});
builder.endArray();
/*builder.startArray("tags");
this.tags.forEach(x -> {
try {
x.toElasticEntity(builder);
@ -46,7 +117,7 @@ public class Dataset implements ElasticEntity<Dataset> {
logger.error(e.getMessage(), e);
}
});
builder.endArray();
builder.endArray();*/
builder.endObject();
return builder;
}
@ -55,7 +126,13 @@ public class Dataset implements ElasticEntity<Dataset> {
public Dataset fromElasticEntity(Map<String, Object> fields) {
if (fields != null) {
this.id = (String) fields.get("id");
this.tags = ((List<Tag>) fields.get("tags"));
// this.tags = ((List<Tag>) fields.get("tags"));
this.label = (String) fields.get("label");
this.template = UUID.fromString((String) fields.get("template"));
this.status = Short.valueOf((String) fields.get("status"));
this.dmp = UUID.fromString((String) fields.get("dmp"));
this.grant = UUID.fromString((String) fields.get("grant"));
this.collaborators = ((List<Collaborator>) fields.get("collaborators"));
}
return this;
}

View File

@ -2,13 +2,15 @@ package eu.eudat.elastic.repository;
import eu.eudat.elastic.criteria.DatasetCriteria;
import eu.eudat.elastic.entities.Dataset;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
import org.elasticsearch.client.core.CountRequest;
import org.elasticsearch.client.core.CountResponse;
import org.elasticsearch.client.indices.GetIndexRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
@ -21,12 +23,10 @@ import org.springframework.stereotype.Service;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
/**
* Created by ikalyvas on 7/5/2018.
*/
@Service("datasetRepository")
public class DatasetRepository extends ElasticRepository<Dataset, DatasetCriteria> {
@ -38,35 +38,70 @@ public class DatasetRepository extends ElasticRepository<Dataset, DatasetCriteri
@Override
public Dataset createOrUpdate(Dataset entity) throws IOException {
XContentBuilder builder = XContentFactory.jsonBuilder();
IndexRequest request = new IndexRequest("datasets", "doc", entity.getId()).source(entity.toElasticEntity(builder));
this.getClient().index(request);
IndexRequest request = new IndexRequest("datasets").id(entity.getId()).source(entity.toElasticEntity(builder));//new IndexRequest("datasets", "doc", entity.getId()).source(entity.toElasticEntity(builder));
this.getClient().index(request, RequestOptions.DEFAULT);
return entity;
}
@Override
public Dataset findDocument(String id) throws IOException {
GetRequest request = new GetRequest("datasets","doc",id);
GetResponse response = this.getClient().get(request);
GetRequest request = new GetRequest("datasets",id);
GetResponse response = this.getClient().get(request, RequestOptions.DEFAULT);
return new Dataset().fromElasticEntity(response.getSourceAsMap());
}
@Override
public List<Dataset> query(DatasetCriteria criteria) throws ExecutionException, InterruptedException, IOException {
SearchRequest searchRequest = new SearchRequest();
SearchRequest searchRequest = new SearchRequest("datasets");
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
BoolQueryBuilder boolQuery = QueryBuilders.boolQuery()
.should(QueryBuilders.termsQuery("tags.name.keyword", criteria.getTags().stream().map(x -> x.getName()).collect(Collectors.toList())));
CountRequest countRequest = new CountRequest("datasets");
countRequest.query(QueryBuilders.matchAllQuery());
CountResponse countResponse = getClient().count(countRequest, RequestOptions.DEFAULT);
Long count = countResponse.getCount();
searchSourceBuilder.size(count.intValue());
BoolQueryBuilder boolQuery = QueryBuilders.boolQuery();
if (criteria.getLabel() != null && !criteria.getLabel().isEmpty()) {
boolQuery = boolQuery.should(QueryBuilders.matchPhrasePrefixQuery("label", criteria.getLabel()));
}
if (criteria.getDatasetTemplates() != null && criteria.getDatasetTemplates().size() > 0) {
boolQuery = boolQuery.should(QueryBuilders.termsQuery("template.keyword", criteria.getDatasetTemplates().stream().map(UUID::toString).collect(Collectors.toList())));
}
if (criteria.getStatus() != null) {
boolQuery = boolQuery.should(QueryBuilders.termQuery("status.keyword", criteria.getStatus().toString()));
}
if (criteria.getDmps() != null && criteria.getDmps().size() > 0) {
boolQuery = boolQuery.should(QueryBuilders.termsQuery("dmp.keyword", criteria.getDmps().stream().map(UUID::toString).collect(Collectors.toList())));
}
if (criteria.getGrants() != null && criteria.getGrants().size() > 0) {
boolQuery = boolQuery.should(QueryBuilders.termsQuery("grant.keyword", criteria.getGrants().stream().map(UUID::toString).collect(Collectors.toList())));
}
if (criteria.getCollaborators() != null && criteria.getCollaborators().size() > 0) {
boolQuery = boolQuery.should(QueryBuilders.termsQuery("collaborators.id.keyword", criteria.getCollaborators().stream().map(UUID::toString).collect(Collectors.toList())));
}
if (boolQuery.should().isEmpty()) {
boolQuery.should(QueryBuilders.matchAllQuery());
} else {
boolQuery.minimumShouldMatch(boolQuery.should().size());
}
searchSourceBuilder.query(boolQuery);
searchRequest.source(searchSourceBuilder);
SearchResponse response = this.getClient().search(searchRequest);
SearchResponse response = this.getClient().search(searchRequest, RequestOptions.DEFAULT);
return Arrays.stream(response.getHits().getHits()).map(x -> this.transformFromString(x.getSourceAsString(), Dataset.class)).collect(Collectors.toList());
}
@Override
public boolean exists() throws IOException {
GetIndexRequest request = new GetIndexRequest();
request.indices("datasets");
return this.getClient().indices().exists(request);
GetIndexRequest request = new GetIndexRequest("datasets");
// request.indices("datasets");
return this.getClient().indices().exists(request, RequestOptions.DEFAULT);
}
}

View File

@ -70,13 +70,13 @@
<dependency>
<groupId>org.elasticsearch</groupId>
<artifactId>elasticsearch</artifactId>
<version>6.3.1</version>
<version>7.6.0</version>
</dependency>
<dependency>
<groupId>org.elasticsearch.client</groupId>
<artifactId>elasticsearch-rest-high-level-client</artifactId>
<version>6.3.1</version>
<version>7.6.0</version>
</dependency>
<dependency>
<groupId>org.hibernate</groupId>
@ -188,7 +188,13 @@
<dependency>
<groupId>org.elasticsearch.client</groupId>
<artifactId>transport</artifactId>
<version>6.3.0</version>
<version>7.6.0</version>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-to-slf4j</artifactId>
<version>2.8.2</version>
</dependency>
</dependencies>

View File

@ -1,20 +1,17 @@
package eu.eudat.configurations;
import org.apache.http.HttpHost;
import org.elasticsearch.client.Client;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.env.Environment;
import java.net.InetAddress;
/**
* Created by ikalyvas on 7/5/2018.
*/
@ -28,12 +25,18 @@ public class ElasticSearchConfiguration {
this.environment = environment;
}
@Bean
@Bean(destroyMethod = "close")
public RestHighLevelClient client() throws Exception {
final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
credentialsProvider.setCredentials(AuthScope.ANY,
new UsernamePasswordCredentials(this.environment.getProperty("elasticsearch.username"), this.environment.getProperty("elasticsearch.password")));
RestHighLevelClient client = new RestHighLevelClient(
RestClient.builder(
new HttpHost(this.environment.getProperty("elasticsearch.host"),
Integer.parseInt(this.environment.getProperty("elasticsearch.port")), "http")));
Integer.parseInt(this.environment.getProperty("elasticsearch.port")), "http"))
.setHttpClientConfigCallback(httpClientBuilder -> httpClientBuilder
.setDefaultCredentialsProvider(credentialsProvider)));
return client;
}
}

View File

@ -13,6 +13,7 @@ import eu.eudat.data.query.items.table.dataset.DatasetPublicTableRequest;
import eu.eudat.data.query.items.table.dataset.DatasetTableRequest;
import eu.eudat.data.query.items.table.datasetprofile.DatasetProfileTableRequestItem;
import eu.eudat.elastic.criteria.DatasetCriteria;
import eu.eudat.elastic.entities.Collaborator;
import eu.eudat.elastic.repository.DatasetRepository;
import eu.eudat.exceptions.security.UnauthorisedException;
import eu.eudat.logic.builders.BuilderFactory;
@ -48,6 +49,7 @@ import org.springframework.core.env.Environment;
import org.springframework.core.io.FileSystemResource;
import org.springframework.http.*;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.web.client.RestTemplate;
import org.springframework.web.multipart.MultipartFile;
@ -104,29 +106,44 @@ public class DatasetManager {
public DataTableData<DatasetListingModel> getPaged(DatasetTableRequest datasetTableRequest, Principal principal) throws Exception {
DatasetCriteria datasetCriteria = new DatasetCriteria();
datasetCriteria.setTags(datasetTableRequest.getCriteria().getTags());
datasetCriteria.setLabel(datasetTableRequest.getCriteria().getLike());
datasetCriteria.setDatasetTemplates(datasetTableRequest.getCriteria().getDatasetTemplates());
if (datasetTableRequest.getCriteria().getStatus() != null) {
datasetCriteria.setStatus(datasetTableRequest.getCriteria().getStatus().shortValue());
}
datasetCriteria.setDmps(datasetTableRequest.getCriteria().getGroupIds());
datasetCriteria.setGrants(datasetTableRequest.getCriteria().getGrants());
datasetCriteria.setCollaborators(datasetTableRequest.getCriteria().getCollaborators());
List<eu.eudat.elastic.entities.Dataset> datasets;
try {
datasets = datasetCriteria.getTags() != null && datasetCriteria.getTags().size() > 0 && datasetRepository.exists() ?
datasetRepository.query(datasetCriteria) : new LinkedList<>();
datasets = datasetRepository.exists() ?
datasetRepository.query(datasetCriteria) : null;
} catch (Exception ex) {
logger.warn(ex.getMessage());
datasets = null;
}
UserInfo userInfo = builderFactory.getBuilder(UserInfoBuilder.class).id(principal.getId()).build();
QueryableList<eu.eudat.data.entities.Dataset> items = databaseRepository.getDatasetDao().getWithCriteria(datasetTableRequest.getCriteria()).withHint(HintedModelFactory.getHint(DatasetListingModel.class));
if (datasets != null && datasetTableRequest.getCriteria().getTags() != null && !datasetTableRequest.getCriteria().getTags().isEmpty()) {
// QueryableList<eu.eudat.data.entities.Dataset> items = databaseRepository.getDatasetDao().getWithCriteria(datasetTableRequest.getCriteria()).withHint(HintedModelFactory.getHint(DatasetListingModel.class));
QueryableList<eu.eudat.data.entities.Dataset> items;
if (datasets != null) {
if (!datasets.isEmpty()) {
items = databaseRepository.getDatasetDao().asQueryable().withHint(HintedModelFactory.getHint(DatasetListingModel.class));
List<eu.eudat.elastic.entities.Dataset> finalDatasets = datasets;
items.where((builder, root) -> root.get("id").in(finalDatasets.stream().map(x -> UUID.fromString(x.getId())).collect(Collectors.toList())));
} else
} else {
items = databaseRepository.getDatasetDao().getWithCriteria(datasetTableRequest.getCriteria()).withHint(HintedModelFactory.getHint(DatasetListingModel.class));
items.where((builder, root) -> root.get("id").in(new UUID[]{UUID.randomUUID()}));
}
} else {
items = databaseRepository.getDatasetDao().getWithCriteria(datasetTableRequest.getCriteria()).withHint(HintedModelFactory.getHint(DatasetListingModel.class));
}
List<Integer> roles = new LinkedList<>();
if (datasetTableRequest.getCriteria().getRole() != null) roles.add(datasetTableRequest.getCriteria().getRole());
QueryableList<eu.eudat.data.entities.Dataset> authItems = databaseRepository.getDatasetDao().getAuthenticated(items, userInfo, roles);
QueryableList<eu.eudat.data.entities.Dataset> pagedItems = PaginationManager.applyPaging(authItems, datasetTableRequest);
DataTableData<DatasetListingModel> dataTable = new DataTableData<DatasetListingModel>();
DataTableData<DatasetListingModel> dataTable = new DataTableData<>();
CompletableFuture<List<DatasetListingModel>> itemsFuture = pagedItems.
@ -144,23 +161,32 @@ public class DatasetManager {
public DataTableData<DatasetListingModel> getPaged(DatasetPublicTableRequest datasetTableRequest, Principal principal) throws Exception {
DatasetCriteria datasetCriteria = new DatasetCriteria();
datasetCriteria.setTags(datasetTableRequest.getCriteria().getTags());
datasetCriteria.setLabel(datasetTableRequest.getCriteria().getLike());
datasetCriteria.setDatasetTemplates(datasetTableRequest.getCriteria().getDatasetProfile());
datasetCriteria.setDmps(datasetTableRequest.getCriteria().getDmpIds());
datasetCriteria.setGrants(datasetTableRequest.getCriteria().getGrants());
List<eu.eudat.elastic.entities.Dataset> datasets;
try {
datasets = datasetCriteria.getTags() != null && datasetCriteria.getTags().size() > 0 && datasetRepository.exists() ?
datasets = datasetRepository.exists() ?
datasetRepository.query(datasetCriteria) : new LinkedList<>();
} catch (Exception ex) {
logger.warn(ex.getMessage());
datasets = null;
}
datasetTableRequest.setQuery(databaseRepository.getDatasetDao().asQueryable().withHint(HintedModelFactory.getHint(DatasetListingModel.class)));
QueryableList<Dataset> items = datasetTableRequest.applyCriteria();
if (datasets != null && datasetTableRequest.getCriteria().getTags() != null && !datasetTableRequest.getCriteria().getTags().isEmpty()) {
/*QueryableList<Dataset> items;
if (datasets != null) {
if (!datasets.isEmpty()) {
items = databaseRepository.getDatasetDao().asQueryable().withHint(HintedModelFactory.getHint(DatasetListingModel.class));
List<eu.eudat.elastic.entities.Dataset> finalDatasets = datasets;
items.where((builder, root) -> root.get("id").in(finalDatasets.stream().map(x -> UUID.fromString(x.getId())).collect(Collectors.toList())));
} else
items = datasetTableRequest.applyCriteria();
items.where((builder, root) -> root.get("id").in(new UUID[]{UUID.randomUUID()}));
}
} else {
items = datasetTableRequest.applyCriteria();
}*/
if (principal.getId() != null && datasetTableRequest.getCriteria().getRole() != null) {
items.where((builder, root) -> {
@ -199,7 +225,8 @@ public class DatasetManager {
datasetElastic = datasetRepository.exists() ?
datasetRepository.findDocument(id) : new eu.eudat.elastic.entities.Dataset();
} catch (Exception ex) {
datasetElastic = new eu.eudat.elastic.entities.Dataset();
logger.warn(ex.getMessage());
datasetElastic = null;
}
dataset.setDatasetProfileDefinition(getPagedProfile(dataset, datasetEntity));
dataset.fromDataModel(datasetEntity);
@ -236,7 +263,10 @@ public class DatasetManager {
boolean latestVersion = profile.getVersion().toString().equals(datasetEntity.getProfile().getVersion().toString());
dataset.setIsProfileLatestVersion(latestVersion);
dataset.setTags(datasetElastic.getTags());
//dataset.setTags(datasetElastic.getTags());
if (datasetElastic != null && datasetElastic.getLabel() != null && !datasetElastic.getLabel().isEmpty()) {
dataset.setLabel(datasetElastic.getLabel());
}
return dataset;
}
@ -519,12 +549,23 @@ public class DatasetManager {
}
private void updateTags(DatasetRepository datasetRepository, DatasetWizardModel datasetWizardModel) throws IOException {
if (datasetWizardModel.getTags() != null && !datasetWizardModel.getTags().isEmpty()) {
// if (datasetWizardModel.getTags() != null && !datasetWizardModel.getTags().isEmpty()) {
eu.eudat.elastic.entities.Dataset dataset = new eu.eudat.elastic.entities.Dataset();
dataset.setId(datasetWizardModel.getId().toString());
dataset.setTags(datasetWizardModel.getTags());
// dataset.setTags(datasetWizardModel.getTags());
dataset.setLabel(datasetWizardModel.getLabel());
dataset.setTemplate(datasetWizardModel.getProfile());
dataset.setStatus(datasetWizardModel.getStatus());
dataset.setDmp(datasetWizardModel.getDmp().getGroupId());
dataset.setGrant(datasetWizardModel.getDmp().getGrant().getId());
dataset.setCollaborators(datasetWizardModel.getDmp().getUsers().stream().map(user -> {
Collaborator collaborator = new Collaborator();
collaborator.setId(user.getId());
collaborator.setName(user.getName());
return collaborator;
}).collect(Collectors.toList()));
datasetRepository.createOrUpdate(dataset);
}
// }
}
private void createRegistriesIfTheyDontExist(RegistryDao registryDao, eu.eudat.data.entities.Dataset dataset) {
@ -709,7 +750,8 @@ public class DatasetManager {
datasetElastic = datasetRepository.exists() ?
datasetRepository.findDocument(id) : new eu.eudat.elastic.entities.Dataset();
} catch (Exception ex) {
datasetElastic = new eu.eudat.elastic.entities.Dataset();
logger.warn(ex.getMessage());
datasetElastic = null;
}
dataset.setDatasetProfileDefinition(getPagedProfile(dataset, datasetEntity));
dataset.fromDataModel(datasetEntity);
@ -731,7 +773,10 @@ public class DatasetManager {
// Now at latest version.
dataset.setIsProfileLatestVersion(true);
dataset.setTags(datasetElastic.getTags());
//dataset.setTags(datasetElastic.getTags());
if (datasetElastic != null && datasetElastic.getLabel() != null && !datasetElastic.getLabel().isEmpty()) {
dataset.setLabel(datasetElastic.getLabel());
}
return dataset;
}

View File

@ -7,6 +7,8 @@ database.password=
####################ELASTIIC SEARCH TAGS OVERRIDES CONFIGURATIONS##########
elasticsearch.host = localhost
elasticsearch.port = 9200
elasticsearch.username=elastic
elasticsearch.password=
####################ELK OVERRIDES CONFIGURATIONS##########
http-logger.server-address = http://localhost:31311

View File

@ -8,6 +8,8 @@ database.password=
####################ELASTIIC SEARCH TAGS OVERRIDES CONFIGURATIONS##########
elasticsearch.host = tags-elastic-search
elasticsearch.port = 9200
elasticsearch.username=elastic
elasticsearch.password=
####################ELK OVERRIDES CONFIGURATIONS##########
http-logger.server-address = http://logstash:31311

View File

@ -8,6 +8,8 @@ database.password=
####################ELASTIIC SEARCH TAGS OVERRIDES CONFIGURATIONS##########
elasticsearch.host = tags-elastic-search
elasticsearch.port = 9200
elasticsearch.username=elastic
elasticsearch.password=
####################ELK OVERRIDES CONFIGURATIONS##########
http-logger.server-address = http://logstash:31311