ClickHouse集群部署(四)

EMQX 企业版技术支持发表于:2022年03月07日 14:20:02


  1. metrika.xml
        <?xml version="1.0"?>
        <yandex>    
            <!--ck集群节点-->    
            <clickhouse_remote_servers>        
            <emqx_cluster_all>            
            <!--分片1-->            
                <shard>                
                <internal_replication>true</internal_replication>                
                    <replica>                    
                    <host>192.168.0.27</host>                    
                    <port>9000</port>                    
                    <user>default</user>                    
                    <password>supersecrect</password>                
                </replica>                
                <!--复制集1-->                
                <replica>                    
                    <host>192.168.0.74</host>                    
                    <port>9002</port>                    
                    <user>default</user>                    
                    <password>supersecrect</password>                
                </replica>            
            </shard>            
            <!--分片2-->            
            <shard>                
            <internal_replication>true</internal_replication>                
                <replica>                    
                    <host>192.168.0.221</host>                   
                    <port>9000</port>                    
                    <user>default</user>                    
                    <password>supersecrect</password>                
                </replica>                
                <!--复制集2-->                
                <replica>                    
                    <host>192.168.0.27</host>                    
                    <port>9002</port>                    
                    <user>default</user>                    
                    <password>supersecrect</password>                
                </replica>            
            </shard>            
            <!--分片3-->            
            <shard>                
            <internal_replication>true</internal_replication>                
            <replica>                    
                <host>192.168.0.74</host>                    
                <port>9000</port>                    
                <user>default</user>                    
                <password>supersecrect</password>                
            </replica>                
            <!--复制集3-->                
            <replica>                    
                <host>192.168.0.221</host>                    
                <port>9002</port>                    
                <user>default</user>                    
                <password>supersecrect</password>                
            </replica>            
            </shard>        
            </emqx_cluster_all>    
            </clickhouse_remote_servers>    
            <!--zookeeper相关配置-->    
            <zookeeper-servers>        
                <node index="1">            
                    <host>192.168.0.27</host>            
                    <port>2181</port>        
                </node>        
                <node index="2">            
                    <host>192.168.0.221</host>            
                    <port>2181</port>        
                </node>        
                <node index="3">            
                    <host>192.168.0.74</host>            
                    <port>2181</port>        
                </node>    
                </zookeeper-servers>    
                <macros>        
                    <layer>01</layer>        
                    <shard>01</shard>        
                    <!--分片号-->        
                    <replica>node6</replica>        
                    <!--当前节点IP-->    
                </macros>    
                <networks>        
                    <ip>::/0</ip>    
                </networks>    
                <!--压缩相关配置-->    
                <clickhouse_compression>        
                <case>            
                    <min_part_size>10000000000</min_part_size>            
                    <min_part_size_ratio>0.01</min_part_size_ratio>            
                    <method>lz4</method>            
                    <!--压缩算法lz4压缩比zstd快, 更占磁盘-->        
                </case>    
                </clickhouse_compression>
            </yandex>

    image.png

    image.png

    users.xml

    每个node中users.xml是相同配置,拷贝node1中的users.xml

    (8)修改属主

    每台clickhouse 机器都执行

    chown -R  clickhouse.clickhouse /data/clickhouse

    image.png

    (9)进程守护

    使用systemd管理

    先删除每台机器上默认安装的文件:/etc/system/system/clickhouse-server.service

    同时移除:systemctl disable clickhouse-server.service

    image.png

    以node1为例:

    vim /etc/systemd/system/clickhouse_node1.service
    # vim /etc/systemd/system/clickhouse_node1.service  
    
    [Unit]
    Description=ClickHouse Server (analytic DBMS for big data)
    Requires=network-online.target
    After=network-online.target 
    
    [Service]
    #Type=simple
    Type=forking
    User=clickhouse
    Group=clickhouseRestart=always
    RestartSec=30
    RuntimeDirectory=clickhouse-server
    #ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml --pid-file=/run/clickhouse-server/clickhouse-server.pid
    ExecStart=/usr/bin/clickhouse-server --daemon --config=/data/clickhouse/ch_9000/config.xml --pid-file=/data/clickhouse/node1/clickhouse-server.pid
    #PIDFile=/data/clickhouse/node1/clickhouse-server.pid
    LimitCORE=infinityLimitNOFILE=500000
    CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE 
    
    [Install]
    WantedBy=multi-user.target

    image.png

    (10)启动

    1. esc-clickhouse-001:

    systemctl start clickhouse_node1.service

    systemctl start clickhouse_node4.service

    2. esc-clickhouse-001:

    systemctl start clickhouse_node2.service

    systemctl start clickhouse_node5.service

    3. esc-clickhouse-001:

    systemctl start clickhouse_node3.service

    systemctl start clickhouse_node6.service

    (11)设置开机自启

    检查是否开机启动

    systemctl list-unit-files |   grep enable|grep click

    image.png

    删除不需要的开机启动

    systemctl disable clickhouse-server.service

    image.png

    设置开机启动

    systemctl enable clickhouse_node2.service

    image.png

四、验证

  1. 连接

    Clickhouse-client -u default –password supersecrect –port 9000 -h192.168.0.27

    image.png

  2. 查询节点信息

    select * from system.clusters;

    image.png

五、创建表

  1. 创建database

    create database testdb on cluster emqx_cluster_all;

    image.png

  2. 创建表(ReplicatedMargeTree)

    CREATE TABLE testdb.test_local on cluster emqx_cluster_all
    (    
        msgid String,    
        topic String,    
        clientid String,    
        payload Nullable(String),    
        eventDate DateTime
    ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/testdb/test_local', '{replica}')
    PARTITION BY toYYYYMM(eventDate)
    ORDER BY (msgid, eventDate, clientid);

    image.png

  3. 创建分布式表

    CREATE TABLE testdb.test ON CLUSTER emqx_cluster_all AS testdb.test_local engine = Distributed(emqx_cluster_all, testdb, test_local, rand());

    image.png

  4. 查看表

    use testdb;
    show tables;

    image.png

  5. 查看zookeeper目录

    select * from system.zookeeper WHERE path='/';

    image.png

六、EMQ对接

  1. 创建资源

    image.png

  2. 创建规则引擎

    Sql: SELECT * FROM "test/#"

    image.png

    响应动作:

    SQL:insert into testdb.test(eventDate,msgid, clientid, topic, payload) values (now(),'${id}', '${clientid}', '${topic}', '${payload}')

    image.png

  3. 客户端发送数据

    采用emq dashboard 自带客户端测试发布消息

    image.png

  4. 数据验证

    (1)规则引擎统计

    image.png

    (2)Clickhouse集群验证

             集群查询:select * from testdb.test;

    image.png

    查看是否在多节点上存数据:

    image.png



    您需要登录后才可以回复