hadoop-ansible/templates/datanode-hdfs-site.xml.j2

191 lines
5.6 KiB
Django/Jinja

<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<property>
<name>dfs.nameservices</name>
<value>{{ hdfs_cluster_id }}</value>
</property>
<property>
<name>dfs.ha.namenodes.{{ hdfs_cluster_id }}</name>
<value>{{ hdfs_cluster_ids }}</value>
</property>
<property>
<name>dfs.namenode.rpc-address.{{ hdfs_cluster_id }}.{{ hdfs_cluster_nn_id_1 }}</name>
<value>{{ hdfs_namenode_1_hostname }}:{{ hdfs_nn_rpc_port }}</value>
</property>
<property>
<name>dfs.namenode.rpc-address.{{ hdfs_cluster_id }}.{{ hdfs_cluster_nn_id_2 }}</name>
<value>{{ hdfs_namenode_2_hostname }}:{{ hdfs_nn_rpc_port }}</value>
</property>
<property>
<name>dfs.namenode.http-address.{{ hdfs_cluster_id }}.{{ hdfs_cluster_nn_id_1 }}</name>
<value>{{ hdfs_namenode_1_hostname }}:{{ hdfs_nn_http_port }}</value>
</property>
<property>
<name>dfs.namenode.http-address.{{ hdfs_cluster_id }}.{{ hdfs_cluster_nn_id_2 }}</name>
<value>{{ hdfs_namenode_2_hostname }}:{{ hdfs_nn_http_port }}</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://{{ hdfs_journal_0 }}:{{ hdfs_journal_port }};{{ hdfs_journal_1 }}:{{ hdfs_journal_port }};{{ hdfs_journal_2 }}:{{ hdfs_journal_port }};{{ hdfs_journal_3 }}:{{ hdfs_journal_port }};{{ hdfs_journal_4 }}:{{ hdfs_journal_port }}/{{ hdfs_journal_id }}</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>{{ hdfs_data_dir }}/{{ hdfs_journal_data_dir }}</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.{{ hdfs_cluster_id }}</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.permissions.superusergroup</name>
<value>supergroup</value>
</property>
<property>
<name>dfs.replication</name>
<value>{{ hdfs_replication }}</value>
</property>
<property>
<name>dfs.namenode.replication.min</name>
<value>1</value>
</property>
<property>
<name>dfs.replication.max</name>
<value>{{ hdfs_repl_max }}</value>
</property>
{% if hdfs_read_shortcircuit %}
<property>
<name>dfs.client.read.shortcircuit</name>
<value>true</value>
</property>
<property>
<name>dfs.client.read.shortcircuit.streams.cache.size</name>
<value>{{ hdfs_read_shortcircuit_cache_size }}</value>
</property>
<property>
<name>dfs.client.read.shortcircuit.streams.cache.expiry.ms</name>
<value>{{ hdfs_read_shortcircuit_cache_expiry }}</value>
</property>
<property>
<name>dfs.domain.socket.path</name>
<value>{{ hdfs_read_shortcircuit_cache_dir }}/dn._PORT</value>
</property>
{% endif %}
<property>
<name>dfs.blocksize</name>
<value>{{ hdfs_block_size }}</value>
</property>
<property>
<name>dfs.socket.timeout</name>
<value>{{ hdfs_dfs_socket_timeout }}</value>
</property>
<property>
<name>dfs.datanode.socket.write.timeout</name>
<value>{{ hdfs_dfs_socket_write_timeout }}</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.permissions</name>
<value>true</value>
</property>
<property>
<name>dfs.client.use.datanode.hostname</name>
<value>false</value>
</property>
<property>
<name>fs.permissions.umask-mode</name>
<value>022</value>
</property>
<property>
<name>dfs.encrypt.data.transfer</name>
<value>false</value>
</property>
<property>
<name>dfs.encrypt.data.transfer.algorithm</name>
<value>rc4</value>
</property>
<property>
<name>dfs.datanode.address</name>
<value>{{ ansible_fqdn }}:{{ hdfs_datanode_rpc_port }}</value>
</property>
<property>
<name>dfs.datanode.ipc.address</name>
<value>{{ ansible_fqdn }}:{{ hdfs_datanode_ipc_port }}</value>
</property>
<property>
<name>dfs.datanode.http.address</name>
<value>{{ ansible_fqdn }}:{{ hdfs_datanode_http_port }}</value>
</property>
<property>
<name>dfs.datanode.https.address</name>
<value>{{ ansible_fqdn }}:50475</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file://{{ hdfs_data_dir }}/{{ hdfs_dn_data_dir }}</value>
</property>
<property>
<name>dfs.datanode.data.dir.perm</name>
<value>700</value>
</property>
<property>
<name>dfs.datanode.handler.count</name>
<value>3</value>
</property>
<property>
<name>dfs.datanode.max.transfer.threads</name>
<value>4096</value>
</property>
<property>
<name>dfs.datanode.du.reserved</name>
<value>10732175360</value>
</property>
<property>
<name>dfs.datanode.failed.volumes.tolerated</name>
<value>0</value>
</property>
<property>
<name>dfs.datanode.balance.bandwidthPerSec</name>
<value>{{ hdfs_dn_balance_bandwidthPerSec }}</value>
</property>
<property>
<name>dfs.datanode.max.xcievers</name>
<value>{{ hdfs_datanode_max_xcievers }}</value>
</property>
<property>
<name>dfs.support.append</name>
<value>{{ hdfs_support_append }}</value>
</property>
<property>
<name>dfs.datanode.plugins</name>
<value></value>
</property>
<property>
<name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.thrift.threads.max</name>
<value>20</value>
</property>
<property>
<name>dfs.thrift.threads.min</name>
<value>10</value>
</property>
<property>
<name>dfs.thrift.timeout</name>
<value>60</value>
</property>
<property>
<name>dfs.datanode.use.datanode.hostname</name>
<value>false</value>
</property>
</configuration>