分类目录归档:web

Elk企业级日志平台详细部署+监控插件

1、操作系统优化:

/etc/sysctl.conf里增加:

vm.max_map_count=262144

/etc/security/limits.conf  增加:

*                soft   nofile          65536

*                hard   nofile          65536

*                soft   nproc           16384

*                hard   nproc           32768

/etc/security/limits.d/90-nproc.conf  修改如下:

 

*          soft    nproc     2048

root       soft    nproc     unlimited

添加主机名/etc/hosts

10.1.14.39  test-20160224.novalocal

10.1.14.40  test-20160224-1.novalocal

10.1.14.41  test-20160224-2.novalocal

2、下载elasticsearch-5.5.3.tar.gz

Tar –zxvf elasticsearch-5.5.3.tar.gz

Mv  elasticsearch-5.5.3    /home/htdocs/

需要用普通用户启动,新建webadmin用户。

chown -R webadin.webadmin /home/htdocs/elasticsearch-5.5.3

修改配置文件:

# ======================== Elasticsearch Configuration =========================

cluster.name: es-cluster

 

node.name: test-20160224.novalocal

#node.master: true

#node.data: true

 

path.data: /home/datas/es

path.logs: /home/logs/es

 

network.host: 10.1.14.39

http.port: 9200

transport.tcp.port: 9300

transport.tcp.compress: true

 

discovery.zen.ping.unicast.hosts: [“10.1.14.39:9300″,”10.1.14.40:9300″,”10.1.14.41:9300″]

discovery.zen.minimum_master_nodes: 1

#gateway.recover_after_nodes: 3

#action.destructive_requires_name: true

 

bootstrap.memory_lock: false

bootstrap.system_call_filter: false

 

http.cors.enabled: true

http.cors.allow-origin: “*”

http.cors.allow-headers: Authorization,Content-Type

 

script.engine.groovy.inline.search: on

script.engine.groovy.inline.aggs: on

 

#xpack.graph.enabled: false

#xpack.ml.enabled: false

#xpack.security.enabled: false

 

新建数据目录mkdir –p /home/datas/es   /home/logs/es; chown –R webadmin.webadmin /home/logs/es /home/datas/es

Cd   /home/htdocs/elasticsearch-5.5.3/bin

./ elasticsearch  -d  #启动

 

scp拷贝elasticsearch-5.5.3这个目录到10.1.14.40,101.14.41上,修改配置文件:

10.1.14.40  elasticsearch.yml

 

# ======================== Elasticsearch Configuration =========================

cluster.name: es-cluster

 

node.name: test-20160224-1.novalocal

#node.master: true

#node.data: true

 

path.data: /home/datas/es

path.logs: /home/logs/es

 

network.host: 10.1.14.40

http.port: 9200

transport.tcp.port: 9300

transport.tcp.compress: true

 

discovery.zen.ping.unicast.hosts: [“10.1.14.39:9300″,”10.1.14.40:9300″,”10.1.14.41:9300″]

discovery.zen.minimum_master_nodes: 1

#gateway.recover_after_nodes: 3

#action.destructive_requires_name: true

 

bootstrap.memory_lock: false

bootstrap.system_call_filter: false

 

http.cors.enabled: true

http.cors.allow-origin: “*”

http.cors.allow-headers: Authorization,Content-Type

 

script.engine.groovy.inline.search: on

script.engine.groovy.inline.aggs: on

 

#xpack.graph.enabled: false

#xpack.ml.enabled: false

#xpack.security.enabled: false

 

10.1.14.41  elasticsearch.yml

# ======================== Elasticsearch Configuration =========================

cluster.name: es-cluster

 

node.name: test-20160224-2.novalocal

#node.master: true

#node.data: true

 

path.data: /home/datas/es

path.logs: /home/logs/es

 

network.host: 10.1.14.41

http.port: 9200

transport.tcp.port: 9300

transport.tcp.compress: true

 

discovery.zen.ping.unicast.hosts: [“10.1.14.39:9300″,”10.1.14.40:9300″,”10.1.14.41:9300″]

discovery.zen.minimum_master_nodes: 1

#gateway.recover_after_nodes: 3

#action.destructive_requires_name: true

 

bootstrap.memory_lock: false

bootstrap.system_call_filter: false

 

http.cors.enabled: true

http.cors.allow-origin: “*”

http.cors.allow-headers: Authorization,Content-Type

 

script.engine.groovy.inline.search: on

script.engine.groovy.inline.aggs: on

 

#xpack.graph.enabled: false

#xpack.ml.enabled: false

#xpack.security.enabled: false

 

启动:

Cd  /home/htdocs/elasticsearch-5.5.3/bin

./ elasticsearch   -d

 

3、安装head监控插件

安装node.js

wget https://nodejs.org/dist/v6.10.2/node-v6.10.2-linux-x64.tar.xz

xz –d node-v6.10.2-linux-x64.tar.xztar xvf node-v6.10.2-linux-x64.tarmv node-v6.10.2-linux-x64 /usr/local/node

 

vim /etc/profile  #以下是所有添加的环境变量,包括了之前的jdk,红色是添加的内容

#java 1.8

JAVA_HOME=/usr/java/jdk1.8.0_131

 

CLASSPATH=.:$JAVA_HOME/lib.tools.jar

 

PATH=$JAVA_HOME/bin:$PATH

 

export JAVA_HOME CLASSPATH PATH

 

#set maven

MAVEN_HOME=/home/app/apache-maven-3.2.5

 

export MAVEN_HOME

 

export PATH=${PATH}:${MAVEN_HOME}/bin

##

export NODE_HOME=/usr/local/node

export PATH=$PATH:$NODE_HOME/bin

 

# node –v# npm –v 安装head插件:npm install -g gruntnpm install -g grunt-cli

chown –R webadmin.webadmin elasticsearch-head

npm install -g grunt

npm install -g grunt-cli

cd elasticsearch-head

npm install

vi elasticsearch-head/_site/app.js找到如下几行,红色是修改的内容:

vi  elasticsearch-head/Gruntfile.js增加如下,红色是增加内容:

 

访问:http://10.1.14.39:9100/

重启后,启动:

grunt server  &

bigdesk安装:

wget http://yellowcong.qiniudn.com/bigdesk-master.zip

解压,

然后配置nginx直到这个目录里直接访问即可:

 

server {

listen       80;

server_name   10.1.14.39;

 

location / {

root   /home/htdocs/bigdesk;

index  index.html index.htm;

}

}

 

#cerebro 插件安装

wget https://github.com/lmenezes/cerebro/releases/download/v0.6.5/cerebro-0.6.5.tgz

tar zxvf cerebro-0.6.5.tgz

cd cerebro-0.6.5/

bin/cerebro

eplug

Zookeeper安装:

版本:zookeeper-3.4.10.tar.gz

也是这三台节点

 

解压 : tar –zxvf zookeeper-2.4.10.tar.gz

Mv zookeeper-3.4.10  /usr/local/zookeeper

编辑:/usr/local/zookeeper/conf/ zoo.cfg

 

# The number of milliseconds of each tick

 

tickTime=2000

 

# The number of ticks that the initial

 

# synchronization phase can take

 

initLimit=10

 

# The number of ticks that can pass between

 

# sending a request and getting an acknowledgement

 

syncLimit=5

 

# the directory where the snapshot is stored.

 

dataDir=/data/zk/zk0/data

 

dataLogDir=/data/zk/zk0/logs

 

# the port at which the clients will connect

 

clientPort=2181

 

server.0 = 10.1.14.39:2888:3888

 

server.1 = 10.1.14.40:2888:3888

 

server.2 = 10.1.14.41:2888:3888

 

autopurge.purgeInterval=1

 

拷贝zookeeper目录到10.1.14.40, 10.1.14.41上。

分别在三台主机的dataDir路径下创建一个文件名为myid的文件,10.1.14.39, 10.1.14.40, 10.1.14.41 分别是:0,1,2

例如在10.1.14.39,

Cd   /data/zk/zk0/data/

#Cat myid

0

在10.1.14.40上这个myid文件内容是1,  41上是2。

 

启动:

Cd  /usr/local/zookeeper/

bin/zkServer.sh start

停止是:

bin/zkServer.sh stop

查看状态是:

bin/zkServer.sh status

 

注:Zookeeper默认会将控制台信息输出到启动路径下的zookeeper.out中,显然在生产环境中我们不能允许Zookeeper这样做,通过如下方法,可以让Zookeeper输出按尺寸切分的日志文件:

修改conf/log4j.properties文件,将zookeeper.root.logger=INFO, CONSOLE改为

zookeeper.root.logger=INFO, ROLLINGFILE修改bin/zkEnv.sh文件,将

ZOO_LOG4J_PROP=”INFO,CONSOLE”改为ZOO_LOG4J_PROP=”INFO,ROLLINGFILE”

然后重启zookeeper,就ok了

 

4、kafka安装,下载kafka_2.12-1.0.0.tgz,解压。

mv kafka_2.12-1.0.0  /usr/local/

进入config目录,编辑server.properties,内容如下:

broker.id=0

listeners=PLAINTEXT://10.1.14.39:9092

num.network.threads=3

num.io.threads=8

socket.send.buffer.bytes=102400

socket.receive.buffer.bytes=102400

socket.request.max.bytes=104857600

log.dirs=/tmp/kafka-logs

num.partitions=1

num.recovery.threads.per.data.dir=1

offsets.topic.replication.factor=1

transaction.state.log.replication.factor=1

transaction.state.log.min.isr=1

log.retention.hours=168

log.segment.bytes=1073741824

log.retention.check.interval.ms=300000

zookeeper.connect=10.1.14.39:2181,10.1.14.40:2181,10.1.14.41:2181

zookeeper.connection.timeout.ms=6000

 

修改完毕后拷贝这个目录到其它2台上,编辑文件,只需修改broker.id

10.1.14.40   broker.id=1

10.1.14.41   broker.id=2

三台分别启动:

./kafka-server-start.sh ../config/server.properties &

 

创建topic,

bin/kafka-topics.sh –create –zookeeper 10.1.14.39:2181,10.1.14.40:2181,10.1.14.41:2181 –replication-factor 3 –partitions 1 –topic my-replicated-topic

 

生产者:

bin/kafka-console-producer.sh –broker-list 10.1.14.39:9092,10.1.14.40:9092,10.1.14.41:9092 –topic my-replicated-topic

输入:

Hello kafka

 

消费者:

bin/kafka-console-consumer.sh –bootstrap-server   10.1.14.39:9092,10.1.14.40:9092,10.1.14.41:9092    –from-beginning –topic my-replicated-topic

会收到,hello kafka.

 

#创建nginx topic

./kafka-topics.sh –create –zookeeper 10.1.14.39:2181,10.1.14.40:2181,10.1.14.41:2181 –replication-factor 3 –partitions 3 –topic nginx-visitor-access-log

 

 

Filebeat安装:

curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-5.1.1-x86_64.rpm

sudo rpm -vi filebeat-5.1.1-x86_64.rpm

 

 

#查看es中数据:

curl ‘10.1.14.39:9200/_cat/indices?v’

 

#配置filebeat

/etc/filebeat/filebeat.yml

 

filebeat.prospectors:

– type: log

enabled: true

paths:

– /home/logs/nginx/*.acc.log

ignore_older: 24h

fields:

log_topic: nginx-visitor-access-log

filebeat.config.modules:

path: ${path.config}/modules.d/*.yml

reload.enabled: false

setup.template.settings:

index.number_of_shards: 3

setup.kibana:

host: “10.1.14.41:5601″

output.kafka:

enable: true

codec.format:

string: ‘%{[message]}’

hosts: [“10.1.14.39:9092″,”10.1.14.40:9092″,”10.1.14.41:9092″]

topic: ‘%{[fields.log_topic]}’

 

/etc/init.d/filebeat start # 启动

 

#logstash 安装:

官方下载logstash-6.1.2

解压

Mv logstash-6.1.2 /usr/local/

 

编写conf文件输出到es:

logstash_to_es.conf

 

input {

 

kafka {

 

bootstrap_servers => “10.1.14.39:9092,10.1.14.40:9092,10.1.14.41:9092″

topics => [“nginx-visitor-access-log”]

type => “nginx-visitor-access-log”

}

 

}

 

filter {

if [type] == “nginx-visitor-access-log”{

grok {

match => { “message” => “%{IPORHOST:remote_addr} – – \[%{HTTPDATE:time_local}\] \”%{WORD:method} %{URIPATHPARAM:request} HTTP/%{NUMBER:httpversion}\” %{INT:status} %{INT:body_bytes_sent} %{QS:http_referer} %{QS:http_user_agent}”

}

}

}else {

grok {

patterns_dir => [“./patterns”]

}

}

}

 

 

output {

 

elasticsearch {

 

hosts => [“10.1.14.39:9200″, “10.1.14.40:9200″,”10.1.14.41:9200″]

 

index => “nginx-visitor-access-log-%{+YYYY.MM.dd}”

 

template_overwrite => true

 

}

 

}

 

./logstash -f logstash_to_es.conf  &  # 启动Logstash

 

#配置nginx日志

log_format main ‘$remote_addr – $remote_user [$time_local] ‘

‘”$request” $status $body_bytes_sent ‘

‘”$http_referer” “$http_user_agent”‘;

 

 

#配置Kibana

官网下载kibana-5.3.2-linux-x86_64

解析,进入config 目录:

配置:kibana.yml 修改如下:

server.port: 5601

server.host: “0.0.0.0”

elasticsearch.url: “http://10.1.14.39:9200″

kibana.index: “.kibana”

 

bin/kibana &  #启动

 

#访问测试:

kibana

glusterfs安装部署

 

准备三台机器, 安装centos7系统。

配置Host:

 

192.168.137.131 gluster1

192.168.137.132 gluster2

192.168.137.133 gluster3

 

建议防火墙都先关闭,部署完成后再加上然后在看整个集群状态,如果正常,就可以了。

 

防火墙添加:iptables -I INPUT -p tcp –dport 24007 -j ACCEPT

 

 

三台上都安装:

yum install centos-release-gluster -y

yum install -y glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma glusterfs-geo-replication glusterfs-devel

 

mkdir /opt/glusterd

 

sed -i ‘s/var\/lib/opt/g’ /etc/glusterfs/glusterd.vol

systemctl start glusterd.service

systemctl enable glusterd.service

systemctl status glusterd.service

 

创建存储目录:

mkdir /opt/gfs_data

 

 

添加节点,只需要在第一台上做,其它不需要,我用的是137.131这台.

gluster peer probe gluster2

gluster peer probe gluster3

gluster peer status

 

创建卷,其它模式请参考:http://www.cnblogs.com/jicki/p/5801712.html

 

gluster volume create k8s-volume transport tcp gluster1:/opt/gfs_data gluster2:/opt/gfs_data gluster3:/opt/gfs_data force

正式环境推荐用8台的条带+复制。

gluster volume info

gluster volume quota k8s-volume limit-usage / 3GB

gluster volume set k8s-volume performance.cache-size 500M

gluster volume set k8s-volume performance.cache-size 500MB

gluster volume set k8s-volume performance.io-thread-count 16

gluster volume set k8s-volume network.ping-timeout 10

gluster volume set k8s-volume performance.write-behind-window-size 200MB

gluster volume info

 

以上参数是我自己根据机器配置调的,另外的参考配置如下:

# 开启 指定 volume 的配额

$ gluster volume quota k8s-volume enable

# 限制 指定 volume 的配额

$ gluster volume quota k8s-volume limit-usage / 1TB

# 设置 cache 大小, 默认32MB

$ gluster volume set k8s-volume performance.cache-size 4GB

# 设置 io 线程, 太大会导致进程崩溃

$ gluster volume set k8s-volume performance.io-thread-count 16

# 设置 网络检测时间, 默认42s

$ gluster volume set k8s-volume network.ping-timeout 10

# 设置 写缓冲区的大小, 默认1M

$ gluster volume set k8s-volume performance.write-behind-window-size 1024MB

 

 

 

客户端安装:

yum install -y glusterfs glusterfs-fuse

 

配置host:

mount -t glusterfs gluster1:k8s-volume /mnt/

 

注意防火墙问题,可以用命令关闭防火墙:

systemctl stop firewalld.service

 

腾讯云cos挂载问题

挂载cos报错如下:

cosfs: /usr/lib64/libstdc++.so.6: version GLIBCXX_3.4.20' not found (required by cosfs)
cosfs: /usr/lib64/libstdc++.so.6: version 
GLIBCXX_3.4.15′ not found (required by cosfs)

解决如下:

strings /usr/lib64/libstdc++.so.6 | grep GLIBC

find / -name “libstdc++.so*”

cp /soft/gcc-4.9.4/gcc-bulild-4.9.4/x86_64-unknown-linux-gnu/libstdc++-v3/src/.libs/libstdc++.so.6.0.20 /usr/lib64/

 

 

Fastdfa 详细部署

以前操作,补一下文档:

安装环境:

10.1.14.40

10.1.14.39

2台上都安装fast_dfs和nginx ,2台上都跑,看后面截图:

下载包:

wget http://jaist.dl.sourceforge.net/project/fastdfs/FastDFS%20Nginx%20Module%20Source%20Code/fastdfs-nginx-module_v1.16.tar.gz

wget http://nginx.org/download/nginx-1.12.2.tar.gz

git clone https://github.com/happyfish100/libfastcommon.git

cd libfastcommon

./make.sh

./make.sh install

wget https://github.com/happyfish100/fastdfs/archive/V5.09.tar.gz

cd   /usr/local/src/fastdfs-5.09/init.d/

./make.sh

./make.sh install

修改配置:

 

cp  tracker.conf.sample  tracker.conf

vi tracker.conf

disabled=false

base_path=/home/yuqing/fastdfs

vim storage.conf

disabled=false

group_name=group1

base_path=/home/yuqing/fastdfs

store_path0=/home/yuqing/fastdfs

tracker_server=10.1.14.39:22122

tracker_server=10.1.14.40:22122

启动:

/usr/bin/fdfs_trackerd /etc/fdfs/tracker.conf  restart

 

/usr/bin/fdfs_storaged /etc/fdfs/storage.conf  restart

 

Nginx 安装:

./configure   –prefix=/usr/local/nginx –add-module=../fastdfs-nginx-module/src –with-pcre=../pcre-8.41

Make

make[1]: *** [objs/addon/src/ngx_http_fastdfs_module.o] Error 1

make[1]: Leaving directory `/usr/local/src/nginx-1.13.6′

make: *** [build] Error 2

修改:

/usr/local/src/fastdfs-nginx-module/src/config

如下:

ngx_addon_name=ngx_http_fastdfs_module

HTTP_MODULES=”$HTTP_MODULES ngx_http_fastdfs_module”

NGX_ADDON_SRCS=”$NGX_ADDON_SRCS $ngx_addon_dir/ngx_http_fastdfs_module.c”

CORE_INCS=”$CORE_INCS /usr/include/fastdfs /usr/include/fastcommon/”

CORE_LIBS=”$CORE_LIBS -L/usr/local/lib -lfastcommon -lfdfsclient”

CFLAGS=”$CFLAGS -D_FILE_OFFSET_BITS=64 -DFDFS_OUTPUT_CHUNK_SIZE=’256*1024′ -DFDFS_MOD_CONF_FILENAME=’\”/etc/fdfs/mod_fastdfs.conf\”‘”

 

Make

Make install

Cd

/usr/local/src/fastdfs-5.09/conf

  • cp anti-steal.jpg http.conf mime.types /etc/fdfs/
  • cd

/usr/local/src/fastdfs-nginx-module/src

cp mod_fastdfs.conf  /etc/fdfs/

vi mod_fastdfs.conf

增加一个tracker:

tracker_server=10.1.14.40:22122

tracker_server=10.1.14.39:22122

 

vim storage.conf

写入2个tracker:

tracker_server=10.1.14.39:22122

tracker_server=10.1.14.40:22122

 

vi client.conf

base_path=/home/yuqing/fastdfs

tracker_server=10.1.14.40:22122

tracker_server=10.1.14.39:22122

 

 

nginx.conf配置:

 

upstream fastdfs_tracker{

server 10.1.14.40:8080;

server 10.1.14.39:8080;

}

 

server {

listen       80;

server_name  10.1.14.40;

 

#charset koi8-r;

 

access_log  logs/host.access.log  main;

 

#location /group1/M00 {

#       root /home/yuqing/fastdfs/data/;

#       ngx_fastdfs_module;

#       }

 

location /group1/M00 {

proxy_next_upstream http_502 http_504 error timeout invalid_header;

#proxy_cache http-cache;

#proxy_cache_valid  200 304 12h;

#proxy_cache_key $uri$is_args$args;

proxy_pass  http://fastdfs_tracker;

#add_header Nginx-Cache “$upstream_cache_status”;

#expires 30d;

}

 

#error_page  404              /404.html;

 

# redirect server error pages to the static page /50x.html

#

error_page   500 502 503 504  /50x.html;

location = /50x.html {

root   html;

}

 

}

 

server {

listen 8080;

server_name 10.1.14.40;

 

location / {

root html;

index index.html index.htm;

}

 

location ~/group[0-9]/M00 {

ngx_fastdfs_module;

}

}

 

Tail  -f /home/yuqing/fastdfs/logs//home/yuqing/fastdfs/logs

2017-11-01 15:22:19] ERROR – file: tracker_mem.c, line: 3872, open file /home/yuqing/fastdfs/data/storage_groups_new.dat fail, errno: 2, error info: No such file or directory

[2017-11-01 15:22:19] INFO – file: tracker_mem.c, line: 4213, sys files loaded from tracker server 10.1.14.40:22122

[2017-11-01 15:22:20] ERROR – file: tracker_service.c, line: 2149, client ip: 10.1.14.39, sync src server: 10.1.14.40 not exists

[2017-11-01 15:22:20] INFO – file: tracker_service.c, line: 969, the tracker leader is 10.1.14.40:22122

发现有报错:

解决如下:

经过调试问题暂时解决了,是多个tracker   data数据不一致,没有自动同步。
删掉新加入节点的/data目录,拷贝当前Leader tracker目录下的 /data文件 过去即可(.pid不要拷贝)

Scp –r data 10.1.14.39:/home/xxxxx/

 

重启后加入的节点14.39

/usr/bin/fdfs_trackerd /etc/fdfs/tracker.conf restart

/usr/bin/fdfs_trackerd /etc/fdfs/tracker.conf restart

查看日志:

[2017-11-01 15:38:13] INFO – file: tracker_relationship.c, line: 383, selecting leader…

[2017-11-01 15:38:14] INFO – file: tracker_relationship.c, line: 422, the tracker leader 10.1.14.40:22122

 

没哟报错了。

 

14.39:

 

两台上都可以上传包:

/usr/bin/fdfs_upload_file /etc/fdfs/client.conf /tmp/word.jpg

 

1.6老版本安装:

  • 先安装libvent-20.0.21-stable.tar.gz

yum install gcc-c++

yum -y install zlib zlib-devel openssl openssl–devel pcre pcre-devel

 

./configure –prefix=/usr/

Make

Make install

 

解压:FastDFS_v4.06.tar.gz  进入目录:

Vi  make.sh  去掉这行注释:WITH_LINUX_SERVICE=1

 

Make.sh

 

Make.sh install

 

Mkdir –p /home/yuqing/fastdfs

 

cd /usr/lib64/

ln -s /usr/lib/libevent-2.0.so.5 libevent-2.0.so.5

启动脚本:

service fdfs_trackerd start

service fdfs_storaged start

service fdfs_storaged status

service fdfs_trackerd status

/usr/local/nginx/sbin/nginx

tar -zxvf fastdfs-nginx-module_v1.15.tar.gz

tar -zxvf pcre-8.41.tar.gz

tar -zxvf nginx-1.6.1.tar.gz

 

./configure  –prefix=/usr/local/nginx –add-module=../fastdfs-nginx-module/src –with-pcre=../pcre-8.41

Make

Make install

Nginx.conf

#user  nobody;

worker_processes  1;

 

#error_log  logs/error.log;

#error_log  logs/error.log  notice;

#error_log  logs/error.log  info;

 

#pid        logs/nginx.pid;

events {

worker_connections  1024;

}

 

http {

include       mime.types;

default_type  application/octet-stream;

 

#log_format  main  ‘$remote_addr – $remote_user [$time_local] “$request” ‘

#                  ‘$status $body_bytes_sent “$http_referer” ‘

#                  ‘”$http_user_agent” “$http_x_forwarded_for”‘;

 

#access_log  logs/access.log  main;

 

sendfile        on;

#tcp_nopush     on;

 

#keepalive_timeout  0;

keepalive_timeout  65;

 

#gzip  on;

 

upstream fastdfs_tracker{

server 10.1.9.14:8080;

server 10.1.9.16:8080;

}

 

server {

listen       80;

server_name  10.1.9.16;

 

#charset koi8-r;

 

#access_log  logs/host.access.log  main;

 

location / {

root   html;

index  index.html index.htm;

}

 

location /group1/M00 {

proxy_next_upstream http_502 http_504 error timeout invalid_header;

#proxy_cache http-cache;

#proxy_cache_valid  200 304 12h;

#proxy_cache_key $uri$is_args$args;

proxy_pass  http://fastdfs_tracker;

#add_header Nginx-Cache “$upstream_cache_status”;

#expires 30d;

}

 

#error_page  404              /404.html;

 

# redirect server error pages to the static page /50x.html

#

error_page   500 502 503 504  /50x.html;

location = /50x.html {

root   html;

}

 

}

 

server {

listen 8080;

server_name 10.1.9.16;

 

location / {

root html;

index index.html index.htm;

}

 

location ~/group[0-9]/M00 {

ngx_fastdfs_module;

}

}

 

 

 

#error_page  404              /404.html;

 

# redirect server error pages to the static page /50x.html

#

#error_page   500 502 503 504  /50x.html;

#location = /50x.html {

#    root   html;

#}

 

 

}

 

以10.1.9.16为例,另一个是9.14,需要修改IP即可。

Nginx日志报:

ERROR – file: ../common/fdfs_global.c, line: 52, the format of filename “group1/M00/00/00/CgEJEFqhB6qABYBRABjk_e3WFFQ333.jpg” is invalid

解决办法:

vi /etc/fdfs/mod_fastdfs.conf

url_have_group_name=false

改为

url_have_group_name=true  

重启nginx : /usr/local/nginx/sbin/nginx –s reload

测试:

上传:/usr/local/src/FastDFS/client/fdfs_upload_file /etc/fdfs/client.conf   /tmp/word.jpg

 

访问:world

 

前端基础-CSS-2

上篇我们介绍了css的的由来和编写语法,并展示了一个基本的例子,这篇继续向大家展示一些例子来说明如何使用css来美化我们的页面展示,css包含非常多的样式设置,在这里我会把最基础和常用的样式设置展示给大家,上篇我们了解了div这个块元素的使用,因为div内部可以写其它标签,但如果我们像上篇一样设置,整个块里的元素都会生效,那如果我只想设置div中某个标签呢,或者我们想通过div的id或class来设置样式,另外我们常见的就是表单,那如何给表单设置样式,以及如何给一个超链接设置样式,这就是今天我们要讲的内容, 首先还是跟之前一样,我们先给出我们基本的html代码和截图:

以上就是基本的Html代码,大家看到我注释掉了这行 内容<link rel=”stylesheet” href=”part2.css”>,这个文件就是我们一会要写的样式文件,为了展示不带样式的页面,我先把这行注释掉了,那这段代码用chrome打开截图如下:

css2-1

那接下来我们完成样式文件的编写,首先我们完成的任务如下:

1、设置背景颜色浅蓝色

2、给h1字体加一个边框,设置h2字体颜色

3、设置div中p段落背景颜色

4、设置表单边框,指定宽度和长度

5、设置超链接颜色和一些文字装饰

基本就这些吧,我们直接上代码:

以上就是整个样式表文件内容,我们看到如果要设置div中p标签的样式,格式可以是 div p(中间一个空格) 的方式来选择设置,另外所有的div都可以设置class或id, 这样我们就可以通过id或class设置样式了,它们的选择方式id以#id名词,例如例子中的#itemone, 如果是classs,就以.开头,例如例子中的.exit, 有了这种方式,我们在设置我们页面样式时会非常的灵活,基本的就讲到这了,保存退出,并取消掉样式文件注释, 刷新浏览器会看到生效后的样式,截图如下:

css2-2

那么通过这个小例子,我相信大家对css的一些基本使用已经解了,那下篇我们要说css中的盒子模型,这是css学习的一个重点,我们下篇见。