kafka单机版环境的搭建方法-成都创新互联网站建设

关于创新互联

多方位宣传企业产品与服务 突出企业形象

公司简介 公司的服务 荣誉资质 新闻动态 联系我们

kafka单机版环境的搭建方法

这篇文章主要介绍“kafka单机版环境的搭建方法”,在日常操作中,相信很多人在kafka单机版环境的搭建方法问题上存在疑惑,小编查阅了各式资料,整理出简单好用的操作方法,希望对大家解答”kafka单机版环境的搭建方法”的疑惑有所帮助!接下来,请跟着小编一起来学习吧!

公司主营业务:成都做网站、网站建设、移动网站开发等业务。帮助企业客户真正实现互联网宣传,提高企业的竞争能力。创新互联是一支青春激扬、勤奋敬业、活力青春激扬、勤奋敬业、活力澎湃、和谐高效的团队。公司秉承以“开放、自由、严谨、自律”为核心的企业文化,感谢他们对我们的高要求,感谢他们从不同领域给我们带来的挑战,让我们激情的团队有机会用头脑与智慧不断的给客户带来惊喜。创新互联推出广水免费做网站回馈大家。

1、启动安装zookeeper

[root@node1 bin]# ./zkServer.sh 

JMX enabled by default

Using config: /opt/bigdata/zookeeper/bin/../conf/zoo.cfg

Usage: ./zkServer.sh {start|start-foreground|stop|restart|status|upgrade|print-cmd}

[root@node1 bin]# ./zkServer.sh  start-foreground

JMX enabled by default

Using config: /opt/bigdata/zookeeper/bin/../conf/zoo.cfg

kafka环境搭建:

[root@node1 bin]# ./kafka-server-start.sh ../config/server.properties

[2016-04-02 04:10:29,995] INFO KafkaConfig values: 

        request.timeout.ms = 30000

        log.roll.hours = 168

        inter.broker.protocol.version = 0.9.0.X

        log.preallocate = false

        security.inter.broker.protocol = PLAINTEXT

        controller.socket.timeout.ms = 30000

        broker.id.generation.enable = true

        ssl.keymanager.algorithm = SunX509

        ssl.key.password = null

        log.cleaner.enable = true

        ssl.provider = null

        num.recovery.threads.per.data.dir = 1

        background.threads = 10

        unclean.leader.election.enable = true

        sasl.kerberos.kinit.cmd = /usr/bin/kinit

        replica.lag.time.max.ms = 10000

        ssl.endpoint.identification.algorithm = null

        auto.create.topics.enable = true

        zookeeper.sync.time.ms = 2000

        ssl.client.auth = none

        ssl.keystore.password = null

        log.cleaner.io.buffer.load.factor = 0.9

        offsets.topic.compression.codec = 0

        log.retention.hours = 168

        log.dirs = /tmp/kafka-logs

        ssl.protocol = TLS

        log.index.size.max.bytes = 10485760

        sasl.kerberos.min.time.before.relogin = 60000

        log.retention.minutes = null

        connections.max.idle.ms = 600000

        ssl.trustmanager.algorithm = PKIX

        offsets.retention.minutes = 1440

        max.connections.per.ip = 2147483647

        replica.fetch.wait.max.ms = 500

        metrics.num.samples = 2

        port = 9092

        offsets.retention.check.interval.ms = 600000

        log.cleaner.dedupe.buffer.size = 134217728

        log.segment.bytes = 1073741824

        group.min.session.timeout.ms = 6000

        producer.purgatory.purge.interval.requests = 1000

        min.insync.replicas = 1

        ssl.truststore.password = null

        log.flush.scheduler.interval.ms = 9223372036854775807

        socket.receive.buffer.bytes = 102400

        leader.imbalance.per.broker.percentage = 10

        num.io.threads = 8

        zookeeper.connect = localhost:2181

        queued.max.requests = 500

        offsets.topic.replication.factor = 3

        replica.socket.timeout.ms = 30000

        offsets.topic.segment.bytes = 104857600

        replica.high.watermark.checkpoint.interval.ms = 5000

        broker.id = 0

        ssl.keystore.location = null

        listeners = PLAINTEXT://:9092

        log.flush.interval.messages = 9223372036854775807

        principal.builder.class = class org.apache.kafka.common.security.auth.DefaultPrincipalBuilder

        log.retention.ms = null

        offsets.commit.required.acks = -1

        sasl.kerberos.principal.to.local.rules = [DEFAULT]

        group.max.session.timeout.ms = 30000

        num.replica.fetchers = 1

        advertised.listeners = null

        replica.socket.receive.buffer.bytes = 65536

        delete.topic.enable = false

        log.index.interval.bytes = 4096

        metric.reporters = []

        compression.type = producer

        log.cleanup.policy = delete

        controlled.shutdown.max.retries = 3

        log.cleaner.threads = 1

        quota.window.size.seconds = 1

        zookeeper.connection.timeout.ms = 6000

        offsets.load.buffer.size = 5242880

        zookeeper.session.timeout.ms = 6000

        ssl.cipher.suites = null

        authorizer.class.name = 

        sasl.kerberos.ticket.renew.jitter = 0.05

        sasl.kerberos.service.name = null

        controlled.shutdown.enable = true

        offsets.topic.num.partitions = 50

        quota.window.num = 11

        message.max.bytes = 1000012

        log.cleaner.backoff.ms = 15000

        log.roll.jitter.hours = 0

        log.retention.check.interval.ms = 300000

        replica.fetch.max.bytes = 1048576

        log.cleaner.delete.retention.ms = 86400000

        fetch.purgatory.purge.interval.requests = 1000

        log.cleaner.min.cleanable.ratio = 0.5

        offsets.commit.timeout.ms = 5000

        zookeeper.set.acl = false

        log.retention.bytes = -1

        offset.metadata.max.bytes = 4096

        leader.imbalance.check.interval.seconds = 300

        quota.consumer.default = 9223372036854775807

        log.roll.jitter.ms = null

        reserved.broker.max.id = 1000

        replica.fetch.backoff.ms = 1000

        advertised.host.name = null

        quota.producer.default = 9223372036854775807

        log.cleaner.io.buffer.size = 524288

        controlled.shutdown.retry.backoff.ms = 5000

        log.dir = /tmp/kafka-logs

        log.flush.offset.checkpoint.interval.ms = 60000

        log.segment.delete.delay.ms = 60000

        num.partitions = 1

        num.network.threads = 3

        socket.request.max.bytes = 104857600

        sasl.kerberos.ticket.renew.window.factor = 0.8

        log.roll.ms = null

        ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]

        socket.send.buffer.bytes = 102400

        log.flush.interval.ms = null

        ssl.truststore.location = null

        log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308

        default.replication.factor = 1

        metrics.sample.window.ms = 30000

        auto.leader.rebalance.enable = true

        host.name = 

        ssl.truststore.type = JKS

        advertised.port = null

        max.connections.per.ip.overrides = 

        replica.fetch.min.bytes = 1

        ssl.keystore.type = JKS

 (kafka.server.KafkaConfig)

3、创建topic

[root@node1 bin]# ./kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test

Created topic "test".

4、列出topic列表

[root@node1 bin]# ./kafka-topics.sh --list --zookeeper localhost:2181

test

5、生产消息

[root@node1 bin]# ./kafka-console-producer.sh --broker-list localhost:9092 --topic test

fhgfhfgh\

gjgjhgjg

gjhgjkghk

nvnbv

6、消费消息

[root@node1 ~]# cd /opt/bigdata/kafka/bin/

[root@node1 bin]# ./kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning

fhgfhfgh\

gjgjhgjg

gjhgjkghk

nvnbv

到此,关于“kafka单机版环境的搭建方法”的学习就结束了,希望能够解决大家的疑惑。理论与实践的搭配能更好的帮助大家学习,快去试试吧!若想继续学习更多相关知识,请继续关注创新互联网站,小编会继续努力为大家带来更多实用的文章!


文章题目:kafka单机版环境的搭建方法
标题链接:http://kswsj.cn/article/ppgdis.html

其他资讯