Detailed profiles of ElasticSearch

################################### Cluster ############## ##################### 
# define the cluster name, the default is elasticsearch 
cluster.name: elasticsearch 
################ #################### Node ############################# ######## 
# define this node name 
node.name: "elk001" 
# this node is a master, master role is to do the coordination, cluster state, when reading data is completed by the individual nodes of the cluster together but can only modify the data to complete the master 
# node.master: to true 
# this node is a child node function is to store data, stores the index like 
# node.data: to true 
# node.rack: rack314 
# set up a server nodes can run, usually a fine, because generally the next machine to run only one node 
# node.max_local_storage_nodes: 1 
#################### ################ Index ################################# ### 
# slice index is defined by the number of 
# index.number_of_shards:5 
the number of copies of # define 
# index.number_of_replicas: 1 
#################################### Paths # ################################### 
settings defined in the configuration file # 
# path.conf: / path / to / conf 
# definition of index data storage location 
# Path.Data: / path / to / the data 
# Path.Data: / path / to / data1, / path / to / data2 

# define the location for temporary files 
# path.work: / path / to / Work 

# path to log files: 
#define log file path 
: # path.logs / path / to / logs 

position #define plug 
# path.plugins: / path / to / plugins 

###### ############################## Plugin ################### ################ 
# the value of each attribute name of the plug, the plug-listed if the value is not in the installation, the node can not be started, the default is no plug- 
# plugin.mandatory: mapper-attachments, lang- Groovy 
################################### Memory ######## ############################ 
#es jvm open when not enough memory swapping, the performance will be poor, so in order to avoid this problem , an overview of the set to true, indicating es lock memory used 
############################## Network And HTTP ## ############################# 
address #elasticsearch bound node 
# network.bind_host: 192.168.0.1 
#elasticsearch and communicate with other nodes address, if it is not set automatically gets 
# network.publish_host: 192.168.0.1 

# both the set 'bind_host' and 'publish_host' : 
# 
# network.host: 192.168.0.1 
port provided for communication between nodes # 
# transport.tcp .port: 9300  
data compression tcp # define whether the time of transmission
# transport.tcp.compress: to true 
# http transmission defined listening port
http.port #: 9200 
set the http interaction transfer content maximum length of # 
# http.max_content_length: 100MB 
# http protocol is enabled, if you do not want to let go elasticsearch http protocol is set to FALSE 
# http.enabled: false 

##### ############################## Gateway ################### ################ 
#elasticsearch underlying persistence, the default is to go local, you can also set aws of S3 
local: # gateway.type 
after # control how many nodes in the cluster to achieve data recovery will begin by early this setting to avoid the cluster of automatic mutual discovery, shard fragmentation problem of incomplete, if within a cluster es a total of five nodes, can be set to 5, then the cluster nodes must have 5 start until after the start of the data slice, if set to 3, the other two nodes is not possible to store data slice 
# gateway.recover_after_nodes: . 1 
# initialization data recovery timeout, if gateway.recover_after_nodes parameter is set to 5, that is, 5 after all nodes start, another five minutes to start the data recovery 
# gateway.recover_after_time: 5m 

# the Set How MANY nodes are expected in Cluster. Once these N nodesthe this 
# are up (and recover_after_nodes IS Met), the begin Recovery Process Immediately is 
# (the without Waiting for recover_after_time to The expire): 
start the data recovery after several nodes # start, if gateway.recover_after_nodes this parameter is set to 5, then wait five after all nodes can directly start the data recovery, without waiting for the set time gateway.recover_after_time 
# gateway.expected_nodes: 2 
########################### ## Recovery Throttling ############################# 
number of concurrent set a node #, 
# cluster.routing.allocation.node_initial_primaries_recoveries: 4 
# cluster.routing.allocation.node_concurrent_recoveries: 2 
# Throttle throughput to the when the Set in a recovering (EG 100MB, by. default 20MB): 
# restoring data, broadband traffic restrictions, if it is 0 is unlimited 
20MB: # indices.recovery.max_bytes_per_sec 

value # restoring data from other fragments, the largest open concurrent
indices.recovery.concurrent_streams #: 5 

################################## Discovery ####### ########################### 
# set this cluster, how many nodes have master's candidacy, if a large cluster of official recommendation 2 - 4 
# discovery.zen.minimum_master_nodes: 1 
#es cluster automatically discovers the timeout of other nodes, if the network delay is large, it is recommended to set a little longer, to prevent miscarriage of justice 
# discovery.zen.ping.timeout: 3S 

# is open and more multicast protocol 
# discovery.zen.ping.multicast.enabled: false 
# set cluster master list of cluster initialization, the array of machines will be added to the cluster automatically discovers 
# discovery.zen.ping.unicast.hosts: [ "host1", "host2: Port" ] 

################################## Slow the Log ######## ########################## 

# Shard Level Query and FETCH threshold logging.

#index.search.slowlog.threshold.query.warn: 10s
#index.search.slowlog.threshold.query.info: 5s
#index.search.slowlog.threshold.query.debug: 2s
#index.search.slowlog.threshold.query.trace: 500ms

#index.search.slowlog.threshold.fetch.warn: 1s
#index.search.slowlog.threshold.fetch.info: 800ms
#index.search.slowlog.threshold.fetch.debug: 500ms
#index.search.slowlog.threshold.fetch.trace: 200ms

#index.indexing.slowlog.threshold.index.warn: 10s
#index.indexing.slowlog.threshold.index.info: 5s
#index.indexing.slowlog.threshold.index.debug: 2s
#index.indexing.slowlog.threshold.index.trace: 500ms

################################## GC Logging ################################

#monitor.jvm.gc.young.warn: 1000ms
#monitor.jvm.gc.young.info: 700ms
#monitor.jvm.gc.young.debug: 400ms

#monitor.jvm.gc.old.warn: 10s
#monitor.jvm.gc.old.info: 5s
#monitor.jvm.gc.old.debug: 2s

################################## Security ################################

# Uncomment if you want to enable JSONP as a valid return transport on the
# http server. With this enabled, it may pose a security risk, so disabling
# it unless you need it is recommended (it is disabled by default).
#
#http.jsonp.enable: true

 

Guess you like

Origin www.cnblogs.com/ssqq5200936/p/10955259.html