Multiple table monitoring why failure
ajinoline opened this issue · 0 comments
This is my boot configuration.
mypipe {
include-event-condition = """ db == "medusa" && (table.startsWith("forum_post_p") || table == "forum_user" ) """
Avro schema repository client class name
schema-repo-client = "mypipe.avro.schema.SchemaRepo"
consumers represent sources for mysql binary logs
consumers {
database1 {
# database "host:port:user:pass" array
source = ["192.168.32.102:3309:mypipe:mypipepass"]
}
database2 {
# database "host:port:user:pass" array
source = ["192.168.32.103:3309:mypipe:mypipepass"]
}
}
data producers export data out (stdout, other stores, external services, etc.)
producers {
stdout {
class = "mypipe.producer.stdout.StdoutProducer"
}
kafka-generic {
class = "mypipe.producer.KafkaMutationGenericAvroProducer"
}
kafka-specific {
class = "mypipe.producer.KafkaMutationSpecificAvroProducer"
}
}
pipes join consumers and producers
pipes {
kafka-specific1 {
enabled = true
consumers = ["database1"]
producer {
kafka-specific {
schema-repo-client = "mypipe.avro.ForumPostSchemaRepo"
metadata-brokers = "192.168.32.29:9092"
}
}
binlog-position-repo {
class = "mypipe.api.repo.ConfigurableZookeeperBasedBinaryLogPositionRepository"
config {
path-prefix = "/mypipe-specific-post"
conn = "192.168.32.29:2181,192.168.32.29:2182,192.168.32.29:2183"
max-retry-seconds = 10
}
}
}
kafka-specific2 {
enabled = true
consumers = ["database2"]
producer {
kafka-specific {
schema-repo-client = "mypipe.avro.ForumUserSchemaRepo"
metadata-brokers = "192.168.32.29:9092"
}
}
binlog-position-repo {
class = "mypipe.api.repo.ConfigurableZookeeperBasedBinaryLogPositionRepository"
config {
path-prefix = "/mypipe-specific-user"
conn = "192.168.32.29:2181,192.168.32.29:2182,192.168.32.29:2183"
max-retry-seconds = 10
}
}
}
}
zookeeper used for Leader Election
zk {
leader-path = "/mypipe-leader"
conn = "192.168.32.29:2181,192.168.32.29:2182,192.168.32.29:2183"
max-retry-seconds = 10
}
}