报错When executing step “Make”
sudo apt-get install build-essential
mongodb脚本执行
1.编写脚本:job.js,内容如下:
conn = new Mongo("mongodb://abc:b=abc@10.0.1.183:27017,10.0.1.184:27017/wps_credit?maxPoolSize=300&replicaSet=c45134ec-6a5f-461e-8745-7081b46b0d87");
db = conn.getDB("abc");
var result = db.transaction_5.find().limit(10);
while(result.hasNext()) {
printjson(result.next());
}
var tm = new Date();
printjson(db.transaction_6.count());
db.transaction_6.remove({});
printjson(db.transaction_7.count());
db.transaction_7.remove({});
printjson(db.transaction_8.count());
db.transaction_8.remove({});
var t2 = new Date().getTime() - t1;
printjson(t2);
2.执行它
mongo -nodb job.js
mongodb主从数据库同步
简单点说,就是从库都是新的mongodb,通过主从切换完成空间的清理。
————————-
1)先删除数据,remove不会阻塞住整个db;
找业务不忙的时间操作。
db.collection1.remove({})
db.collection2.remove({})
…
2)然后,secondary上的数据重新同步,这样secondary删除的空间就释放了。
同步完之后进行一次主从切换,Secondary升级为Primary
3)新Secondary再同样进行重新同步数据,同样释放空间了
4)drop掉需要清理的空表
db.collection1.drop()
db.collection2.drop()
…
VC2017的运行时库提取
如果你是用Visual Studio 2015和2017来编写C或C++程序,那么就已经是基于UCRT的。
为了方便提取运行时库,请安装EveryThing搜索工具。
工具下载地址:https://www.voidtools.com/zh-cn/downloads/
以VC2017运行时库为例。
VC2017运行时库【UCRT库】包括以下几种文件。
Microsoft.VC141.CRT:
1.在EveryThing的输入界面搜索:Microsoft.VC141.CRT
2.再搜索ucrt,在Redist的子目录下,才是正常的UWP或ucrt库,文件一般是41个。
kafka容器化部署
1.参考https://github.com/wurstmeister/kafka-docker的实现。
2.参考https://github.com/simplesteph/kafka-stack-docker-compose
3.基于上述两个参考,实现以下的部署文件。
version: '3.1'
services:
zoo1:
image: zookeeper:3.4.9
hostname: zoo1
ports:
- "2181:2181"
environment:
ZOO_MY_ID: 1
ZOO_PORT: 2181
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
volumes:
- ./zk-multiple-kafka-multiple/zoo1/data:/data
- ./zk-multiple-kafka-multiple/zoo1/datalog:/datalog
zoo2:
image: zookeeper:3.4.9
hostname: zoo2
ports:
- "2182:2182"
environment:
ZOO_MY_ID: 2
ZOO_PORT: 2182
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
volumes:
- ./zk-multiple-kafka-multiple/zoo2/data:/data
- ./zk-multiple-kafka-multiple/zoo2/datalog:/datalog
zoo3:
image: zookeeper:3.4.9
hostname: zoo3
ports:
- "2183:2183"
environment:
ZOO_MY_ID: 3
ZOO_PORT: 2183
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
volumes:
- ./zk-multiple-kafka-multiple/zoo3/data:/data
- ./zk-multiple-kafka-multiple/zoo3/datalog:/datalog
kafka1:
image: wurstmeister/kafka:2.12-2.0.1
container_name: kafka1
hostname: kafka1
ports:
- "9092:9092"
- "1099:1099"
environment:
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2182,zoo3:2183"
KAFKA_BROKER_ID: 1
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_LISTENERS: PLAINTEXT://:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.10.100:9092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_DEFAULT_REPLICATION_FACTOR: 3
KAFKA_JMX_OPTS: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=127.0.0.1 -Dcom.sun.management.jmxremote.rmi.port=1099"
JMX_PORT: 1099
volumes:
- ./zk-multiple-kafka-multiple/kafka1:/kafka
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- zoo1
- zoo2
- zoo3
kafka2:
image: wurstmeister/kafka:2.12-2.0.1
container_name: kafka2
hostname: kafka2
ports:
- "9093:9092"
- "2099:1099"
environment:
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2182,zoo3:2183"
KAFKA_BROKER_ID: 2
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_LISTENERS: PLAINTEXT://:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.10.100:9093
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_DEFAULT_REPLICATION_FACTOR: 3
KAFKA_JMX_OPTS: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=127.0.0.1 -Dcom.sun.management.jmxremote.rmi.port=1099"
JMX_PORT: 1099
volumes:
- ./zk-multiple-kafka-multiple/kafka2:/kafka
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- zoo1
- zoo2
- zoo3
kafka3:
image: wurstmeister/kafka:2.12-2.0.1
container_name: kafka3
hostname: kafka3
ports:
- "9094:9092"
- "3099:1099"
environment:
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2182,zoo3:2183"
KAFKA_BROKER_ID: 3
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_LISTENERS: PLAINTEXT://:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.10.100:9094
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_DEFAULT_REPLICATION_FACTOR: 3
KAFKA_JMX_OPTS: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=127.0.0.1 -Dcom.sun.management.jmxremote.rmi.port=1099"
JMX_PORT: 1099
volumes:
- ./zk-multiple-kafka-multiple/kafka3:/kafka
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- zoo1
- zoo2
- zoo3
manager:
image: hlebalbau/kafka-manager:2.0.0.2
hostname: manager
ports:
- "9000:9000"
environment:
ZK_HOSTS: "zoo1:2181,zoo2:2182,zoo3:2183"
APPLICATION_SECRET: "random-secret"
KAFKA_MANAGER_AUTH_ENABLED: "true"
KAFKA_MANAGER_USERNAME: "abc"
KAFKA_MANAGER_PASSWORD: "123"
command: -Dpidfile.path=/dev/null
4.测试文件
基于https://github.com/segmentio/kafka-go库的示范,实现如下:
package kaf
import (
"context"
"fmt"
"github.com/segmentio/kafka-go"
"log"
"time"
)
func LeaderProduce() {
topic := "my-topic"
partition := 0
conn, err := kafka.DialLeader(context.Background(), "tcp", "localhost:9092", topic, partition)
if err != nil {
log.Fatal(err)
}
conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
conn.WriteMessages(
kafka.Message{Value: []byte(fmt.Sprint("one!", time.Now()))},
kafka.Message{Value: []byte(fmt.Sprint("two!", time.Now()))},
kafka.Message{Value: []byte(fmt.Sprint("three!", time.Now()))},
)
conn.Close()
}
func LeaderConsumer() {
topic := "my-topic"
partition := 0
conn, _ := kafka.DialLeader(context.Background(), "tcp", "localhost:9092", topic, partition)
conn.SetReadDeadline(time.Now().Add(10 * time.Second))
batch := conn.ReadBatch(10e3, 1e6) // fetch 10KB min, 1MB max
for {
msg, err := batch.ReadMessage()
if err != nil {
break
}
fmt.Println(string(msg.Value))
}
batch.Close()
conn.Close()
}
func ClusterProduce(port int) {
// make a writer that produces to topic-A, using the least-bytes distribution
w := kafka.NewWriter(kafka.WriterConfig{
Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"},
Topic: "topic-A",
Balancer: &kafka.LeastBytes{},
})
err := w.WriteMessages(context.Background(),
kafka.Message{
Key: []byte("Key-A"),
Value: []byte(fmt.Sprint("Hello World!", time.Now())),
},
kafka.Message{
Key: []byte("Key-B"),
Value: []byte(fmt.Sprint("One!", time.Now())),
},
)
if err != nil {
fmt.Println(port, "error", err)
}
w.Close()
}
func clusterConsume(port int) {
// make a new reader that consumes from topic-A
r := kafka.NewReader(kafka.ReaderConfig{
Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"},
GroupID: "consumer-group-id",
Topic: "topic-A",
MinBytes: 1024 * 10, // 10KB
MaxBytes: 10e6, // 10MB
})
for {
m, err := r.ReadMessage(context.Background())
if err != nil {
fmt.Println(port, "error.....", err)
time.Sleep(time.Second * 10)
continue
}
fmt.Printf("%v--message at topic/partition/offset %v/%v/%v: %s = %s\n", port, m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value))
// time.Sleep(time.Second)
}
r.Close()
}
YUM安装PHP7的开发环境
第一步:安装remi源
yum install epel-release -y
或rpm -Uvh http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
rpm -Uvh http://rpms.remirepo.net/enterprise/remi-release-7.rpm
或yum install http://rpms.famillecollet.com/enterprise/remi-release-7.rpm
第二步:配置php7.2仓库
yum -y install yum-utils
yum-config-manager –enable remi-php72 #yum -y install yum-utils
第三步:安装PHP
yum install php 因为上一步remi配置,所以这里会指向php72
第四步:安装扩展组件
yum install php php72-php-opcache php72-php-ldap php72-php-odbc php72-php-pear php72-php-xml php72-php-xmlrpc php72-php-soap curl curl-devel php72-php-mbstring php72-php-mysqlnd php72-php-fpm php72-php-gd php72-php-xdebug php72-php-pecl-mysql php72-php-pecl-memcached php72-php-pecl-memcache php72-php-pecl-redis
第五步:安装php-fpm
yum install php72-php-fpm
systemctl restart php72-php-fpm #启动php-fpm服务
netstat -tunlp|grep 9000 #查看9000端口是否正常启动了
php的配置文件及组件的安装位置
/etc/opt/remi/php72
/etc/opt/remi/php72/php-fpm.d/*.conf
—————————
安装xdebug
搜索相应库:yum search php|grep xdebug
yum install php72-php-pecl-xdebug
CentOS 7 provides PHP version 5.4 in its official repository
Command to install the EPEL repository configuration package:
yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
Command to install the Remi repository configuration package:
yum install https://rpms.remirepo.net/enterprise/remi-release-7.rpm
Command to install the yum-utils package (for the yum-config-manager command):
yum install yum-utils
You want a single version which means replacing base packages from the distribution
Packages have the same name than the base repository, ie php-*
Some common dependencies are available in remi-safe repository, which is enabled by default
PHP version 7.2 packages are available for CentOS 7 in remi-php72 repository
Command to enable the repository:
yum-config-manager --enable remi-php72
If the priorities plugin is enabled, ensure remi-php72 have higher priority (a lower value) than base and updates
Command to upgrade (the repository only provides PHP):
yum update
Command to install additional packages:
yum install php-xxx
Command to install testing packages:
yum --enablerepo=remi-php72-test install php-xxx
Command to check the installed version and available extensions:
php --version
php --modules
ZooKeeper的容器化配置
docker pull zookeeper
https://github.com/getwingm/kafka-stack-docker-compose
version: '3.1'
services:
zoo1:
image: zookeeper
restart: always
hostname: zoo1
ports:
- 2181:2181
environment:
ZOO_MY_ID: 1
ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
zoo2:
image: zookeeper
restart: always
hostname: zoo2
ports:
- 2182:2181
environment:
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zoo3:2888:3888;2181
zoo3:
image: zookeeper
restart: always
hostname: zoo3
ports:
- 2183:2181
environment:
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181
Mongo常见操作
Mongo后台建唯一索引
db.orders.ensureIndex({userid:1,reqid:1},{unique:true,background:true})
备分数据库
mongodump -h 10.0.1.106 –port 27017 -u root -p xxxxxxx -d abc_gold -o /data/backup/abc_gold –authenticationDatabase admin
数据恢复
mongorestore -h 10.0.1.107 –port 27017 -u root -p yyyyyyyyy -d abc_gold /data/backup/abc_gold –drop –authenticationDatabase admin
Centos历史版本下载
http://mirror.nsc.liu.se/centos-store/
pika编译及运行
1.基于https://github.com/Qihoo360/pika/的v3.1.1版本,定制出适合业务要求的功能改进版。
https://github.com/kxtry/pika是在v3.2.7基础上,新增了incrbyrange(key,val,min, max)及hincrbyrange(key,val,min, max)这两个命令。
2.官方提供的编译是直接基于docker的编译。
编译Dockerfile文件。
应该修改Dockerfile的Centos版本为FROM centos:7.5.1804
docker build -t pika .
3.提取编译后结果,也可以直接用该容器运行应用
运行应用:
docker run pika:latest bash -c "./bin/pika -c ./conf/pika.conf"
也可提取相关应用至宿主机运行。
docker cp 容器名:/pika/output ./ #docker cp ce4541cc4627:/pika/output ./
4.提取出来的应用,在宿主机上运行,需要安装相关依赖。
安装epel源。
rpm -ivh https://mirrors.ustc.edu.cn/epel/epel-release-latest-7.noarch.rpm
安装glog和protobuf的动态连接库。
sudo yum install -y glog protobuf #编译时,对应的是glog-devel 和protobuf-devel
如果仍然无法运行,则执行strace ./pika或ldd pika来检查缺少哪些动态库。
5. 运行:
docker run pika:latest bash -c "./bin/pika -c ./conf/pika.conf"
6. 运行脚本run-app.sh
#!/bin/sh
# crontab -e
# */1 * * * * sh /data/scripts/run-app.sh start
path_current=`pwd`
path_script=$(cd "$(dirname "$0")"; pwd)
path_data=$path_script/data
logfile=$path_data/check.log
mode=$1
name=pika
app_process=`ps -ef | grep "$name"| grep -v grep`
if [ ! -d $path_data ];then
mkdir -p $path_data
fi
echo `date` >> $logfile
echo "ready to check...." >> $logfile
case "$mode" in
'install')
if [ ! -f $path_script/.envok ]; then
rpm -ivh https://mirrors.ustc.edu.cn/epel/epel-release-latest-7.noarch.rpm
yum install -y glog protobuf && touch $path_script/.envok
fi
if [ ! -f $path_script/conf/pika.conf ]; then
mkdir -p $path_data && /bin/cp -rf $path_script/pika.conf.template $path_script/conf/pika.conf && echo "$path_script/conf/pika.conf" | xargs /bin/sed -i "s#{{path_current}}#$path_data#g"
fi
;;
'start')
echo "$app_process" >> $logfile
echo "it's ready to start op...."
if test -n "$app_process"; then
echo ""
echo "$app_process"
echo ""
else
cd $path_script
nohup $path_script/bin/$name -c $path_script/conf/${name}.conf > $path_data/info.txt 2>&1 &
echo "success to restart $name" >> $logfile
cd $path_current
fi
echo 'success to start.'
;;
'stop')
echo "it's ready to check process..."
if test -n "$app_process"; then
echo "had find app process informaton"
echo $app_process | awk '{print ($2)}' | xargs kill -3
fi
echo 'success to kill.'
;;
*)
basename=`basename "$0"`
echo "Usage: $basename {install|start|stop} [ server options ]"
exit 1
;;
esac
exit 1
7.原默认配置脚本pika.conf.template
# Pika port
port : 9221
# Thread Number
thread-num : 50
# Thread Pool Size
thread-pool-size : 100
# Sync Thread Number
sync-thread-num : 10
# Pika log path
log-path : {{path_current}}/log/
# Pika db path
db-path : {{path_current}}/db/
# Pika write-buffer-size
write-buffer-size : 268435456
# Pika timeout
timeout : 60
# Requirepass
requirepass : abc123
# Masterauth
masterauth : abc123
# Userpass
userpass : abc123
# User Blacklist
userblacklist :
# if this option is set to 'classic', that means pika support multiple DB, in
# this mode, option databases enable
# if this option is set to 'sharding', that means pika support multiple Table, you
# can specify partition num for each table, in this mode, option table-list enable
# Pika instance mode [classic | sharding]
instance-mode : classic
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT where
# dbid is a number between 0 and 'databases' - 1, limited in [1, 8]
databases : 1
# Table list
table-list : table1:1,table2:1
# Dump Prefix
dump-prefix :
# daemonize [yes | no]
daemonize : yes
# Dump Path
dump-path : {{path_current}}/dump/
# Expire-dump-days
dump-expire : 0
# pidfile Path
pidfile : {{path_current}}/pika.pid
# Max Connection
maxclients : 20000
# the per file size of sst to compact, defalut is 2M
target-file-size-base : 20971520
# Expire-logs-days
expire-logs-days : 7
# Expire-logs-nums
expire-logs-nums : 10
# Root-connection-num
root-connection-num : 2
# Slowlog-write-errorlog
slowlog-write-errorlog : no
# Slowlog-log-slower-than
slowlog-log-slower-than : 10000
# Slowlog-max-len
slowlog-max-len : 128
# Pika db sync path
db-sync-path : {{path_current}}/dbsync/
# db sync speed(MB) max is set to 1024MB, min is set to 0, and if below 0 or above 1024, the value will be adjust to 1024
db-sync-speed : -1
# The slave priority
slave-priority : 100
# network interface
#network-interface : eth1
# replication
#slaveof : master-ip:master-port
# CronTask, format 1: start-end/ratio, like 02-04/60, pika will check to schedule compaction between 2 to 4 o'clock everyday
# if the freesize/disksize > 60%.
# format 2: week/start-end/ratio, like 3/02-04/60, pika will check to schedule compaction between 2 to 4 o'clock
# every wednesday, if the freesize/disksize > 60%.
# NOTICE: if compact-interval is set, compact-cron will be mask and disable.
#
#compact-cron : 3/02-04/60
# Compact-interval, format: interval/ratio, like 6/60, pika will check to schedule compaction every 6 hours,
# if the freesize/disksize > 60%. NOTICE:compact-interval is prior than compact-cron;
#compact-interval :
# server-id for hub
server-id : 1
###################
## Critical Settings
###################
# write_binlog [yes | no]
write-binlog : yes
# binlog file size: default is 100M, limited in [1K, 2G]
binlog-file-size : 104857600
# Automatically triggers a small compaction according statistics
# Use the cache to store up to 'max-cache-statistic-keys' keys
# if 'max-cache-statistic-keys' set to '0', that means turn off the statistics function
# it also doesn't automatically trigger a small compact feature
max-cache-statistic-keys : 0
# When 'delete' or 'overwrite' a specific multi-data structure key 'small-compaction-threshold' times,
# a small compact is triggered automatically, default is 5000, limited in [1, 100000]
small-compaction-threshold : 5000
# If the total size of all live memtables of all the DBs exceeds
# the limit, a flush will be triggered in the next DB to which the next write
# is issued.
max-write-buffer-size : 10737418240
# Compression
compression : snappy
# max-background-flushes: default is 1, limited in [1, 4]
max-background-flushes : 1
# max-background-compactions: default is 2, limited in [1, 8]
max-background-compactions : 2
# max-cache-files default is 5000
max-cache-files : 5000
# max_bytes_for_level_multiplier: default is 10, you can change it to 5
max-bytes-for-level-multiplier : 10
# BlockBasedTable block_size, default 4k
# block-size: 4096
# block LRU cache, default 8M, 0 to disable
# block-cache: 8388608
# whether the block cache is shared among the RocksDB instances, default is per CF
# share-block-cache: no
# whether or not index and filter blocks is stored in block cache
# cache-index-and-filter-blocks: no
# when set to yes, bloomfilter of the last level will not be built
# optimize-filters-for-hits: no
# https://github.com/facebook/rocksdb/wiki/Leveled-Compaction#levels-target-size
# level-compaction-dynamic-level-bytes: no
8.目录结构如下:
Top
|--->bin
|--->conf
|--->tool
pika.conf.template
run-app.sh