皮杜妮

  BlogJava :: 首页 :: 联系 :: 聚合  :: 管理
  887 Posts :: 3 Stories :: 7 Comments :: 0 Trackbacks

2013年11月23日 #

jfinal+freemarker+jquery mobile  开发出web应用, 然后找个壳子打包成apk应用。





http://www.cnblogs.com/gzggyy/archive/2013/05/14/3077510.html
posted @ 2016-09-25 17:54 猪眼睛| 编辑 收藏

可以使用AntiSamy防范XSS跨站脚本攻击



常用XSS方式分为以下几种:

1.      输入框中直接输入恶意脚本,如:

><script>alert(document.cookie)</script>

2.      输入框中输入html标签,在标签中嵌入恶意脚本,如src,href,css style等。

<IMG SRC="javascript:alert('XSS');">; <BODY BACKGROUND="javascript:alert('XSS')"> <STYLE>li {list-style-image:url("javascript:alert('XSS')");}</STYLE><UL><LI>XSS</br>

3.      将恶意脚本注入在event事件中,如onClick,onBlur,onMouseOver等事件。

<a onmouseover="alert(document.cookie)">xxslink</a>

4.      在remote style sheet,javascript中,如

<LINK REL="stylesheet"HREF="javascript:alert('XSS');">
posted @ 2016-07-08 11:44 猪眼睛| 编辑 收藏

ElasticSearch各个版本的apo
https://www.elastic.co/guide/en/elasticsearch/client/java-api/2.2/index.html


posted @ 2016-05-10 15:46 猪眼睛| 编辑 收藏


dubbo是阿里巴巴的框架,主要有4部分组成,1 服务台提供方, 2 服务注册方 3 服务消费分 4 监控部分

1. 注册方一般用zookeeper, 先下载安装,启动zkservece.cmd 会报错,需要修改配置文件 zoo.cfg. 路径中conf/下,,没有自己加一个。
正常启动

2. 编写一个服务器端, 创建maven项目

pom.xml文件

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
  <modelVersion>4.0.0</modelVersion>
  <groupId>DubboService</groupId>
  <artifactId>DubboService</artifactId>
  <version>0.0.1-SNAPSHOT</version>
  <build/>
          <dependencies>
            <dependency>
                <groupId>junit</groupId>
                <artifactId>junit</artifactId>
                <version>3.8.1</version>
                <scope>test</scope>
            </dependency>
            <dependency>
                <groupId>commons-logging</groupId>
                <artifactId>commons-logging</artifactId>
                <version>1.1.1</version>
            </dependency>
            <dependency>
                <groupId>com.alibaba</groupId>
                <artifactId>dubbo</artifactId>
                <version>2.5.3</version>
            </dependency>
            <dependency>
                <groupId>org.javassist</groupId>
                <artifactId>javassist</artifactId>
                <version>3.18.1-GA</version>
            </dependency>
            <dependency>
                <groupId>log4j</groupId>
                <artifactId>log4j</artifactId>
                <version>1.2.15</version>
                <exclusions>
                    <exclusion>
                        <groupId>com.sun.jdmk</groupId>
                        <artifactId>jmxtools</artifactId>
                    </exclusion>
                    <exclusion>
                        <groupId>com.sun.jmx</groupId>
                        <artifactId>jmxri</artifactId>
                    </exclusion>
                    <exclusion>
                        <artifactId>jms</artifactId>
                        <groupId>javax.jms</groupId>
                    </exclusion>
                    <exclusion>
                        <artifactId>mail</artifactId>
                        <groupId>javax.mail</groupId>
                    </exclusion>
                </exclusions>
            </dependency>
            <dependency>
                <groupId>org.springframework</groupId>
                <artifactId>spring</artifactId>
                <version>2.5.6.SEC03</version>
            </dependency>
            <dependency>
                <groupId>org.slf4j</groupId>
                <artifactId>slf4j-api</artifactId>
                <version>1.7.6</version>
            </dependency>
            <dependency>
                <groupId>org.slf4j</groupId>
                <artifactId>slf4j-log4j12</artifactId>
                <version>1.6.1</version>
            </dependency>
            <dependency>
                <groupId>org.apache.zookeeper</groupId>
                <artifactId>zookeeper</artifactId>
                <version>3.4.5</version>
                <type>pom</type>
            </dependency>
            <dependency>
                <groupId>com.101tec</groupId>
                <artifactId>zkclient</artifactId>
                <version>0.4</version>
            </dependency>
        </dependencies>
      <repositories>
        <repository>
            <id>spring-snapshots</id>
            <url>http://repo.spring.io/libs-snapshot</url>
        </repository>
    </repositories>   
    
</project>

config/applicationProvider.xml  配置文件,里面定义了注册的bean, 和zookeeper的地址
<?xml version="1.0" encoding="UTF-8"?>
<beans
    xmlns="http://www.springframework.org/schema/beans"
    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
    xmlns:dubbo="http://code.alibabatech.com/schema/dubbo"    xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd http://code.alibabatech.com/schema/dubbo http://code.alibabatech.com/schema/dubbo/dubbo.xsd ">
    <dubbo:application name="hello-world" />
    <!-- 注册地址 -->
     <dubbo:registry address="zookeeper://localhost:2181" />
     <dubbo:protocol name="dubbo" port="20880" />
        <!-- Service interface Concurrent Control -->
        <dubbo:service interface="cn.zto.service.IProcessData"    ref="demoService" executes="10" />
        <!-- designate implementation -->
        <bean id="demoService" class="cn.zto.service.impl.ProcessDataImpl" />
    </beans>
IProcessData定义接口及实现类
package cn.zto.service.impl;

import cn.zto.service.IProcessData;
public class ProcessDataImpl implements IProcessData {
public String hello(String name) {
    System.out.println(name);
    return "hello : " + name;
}}

package cn.zto.service;

public interface IProcessData {
    public String hello(String name);
}
启动服务
package cn.zto.app;

import org.springframework.context.support.ClassPathXmlApplicationContext;
public class Main {
public static void main(String[] args) throws Exception {
     ClassPathXmlApplicationContext context=new ClassPathXmlApplicationContext(    new String[] {
"config/applicationProvider.xml"
});
context.start();
System.out.println("按任意键退出");
System.in.read();
}}
运行起来。如下
log4j:WARN No appenders could be found for logger (org.springframework.context.support.ClassPathXmlApplicationContext).
log4j:WARN Please initialize the log4j system properly.
按任意键退出



下面再建一个client程序
pom.xml
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
  <modelVersion>4.0.0</modelVersion>
  <groupId>DubboClient</groupId>
  <artifactId>DubboClient</artifactId>
  <version>0.0.1-SNAPSHOT</version>
  <build/>
 
          <dependencies>
            <dependency>
                <groupId>junit</groupId>
                <artifactId>junit</artifactId>
                <version>3.8.1</version>
                <scope>test</scope>
            </dependency>
            <dependency>
                <groupId>commons-logging</groupId>
                <artifactId>commons-logging</artifactId>
                <version>1.1.1</version>
            </dependency>
            <dependency>
                <groupId>com.alibaba</groupId>
                <artifactId>dubbo</artifactId>
                <version>2.5.3</version>
            </dependency>
            <dependency>
                <groupId>org.javassist</groupId>
                <artifactId>javassist</artifactId>
                <version>3.18.1-GA</version>
            </dependency>
            <dependency>
                <groupId>log4j</groupId>
                <artifactId>log4j</artifactId>
                <version>1.2.15</version>
                <exclusions>
                    <exclusion>
                        <groupId>com.sun.jdmk</groupId>
                        <artifactId>jmxtools</artifactId>
                    </exclusion>
                    <exclusion>
                        <groupId>com.sun.jmx</groupId>
                        <artifactId>jmxri</artifactId>
                    </exclusion>
                    <exclusion>
                        <artifactId>jms</artifactId>
                        <groupId>javax.jms</groupId>
                    </exclusion>
                    <exclusion>
                        <artifactId>mail</artifactId>
                        <groupId>javax.mail</groupId>
                    </exclusion>
                </exclusions>
            </dependency>
            <dependency>
                <groupId>org.springframework</groupId>
                <artifactId>spring</artifactId>
                <version>2.5.6.SEC03</version>
            </dependency>
            <dependency>
                <groupId>org.slf4j</groupId>
                <artifactId>slf4j-api</artifactId>
                <version>1.7.6</version>
            </dependency>
            <dependency>
                <groupId>org.slf4j</groupId>
                <artifactId>slf4j-log4j12</artifactId>
                <version>1.6.1</version>
            </dependency>
            <dependency>
                <groupId>org.apache.zookeeper</groupId>
                <artifactId>zookeeper</artifactId>
                <version>3.4.5</version>
                <type>pom</type>
            </dependency>
            <dependency>
                <groupId>com.101tec</groupId>
                <artifactId>zkclient</artifactId>
                <version>0.4</version>
            </dependency>
        </dependencies>
 
 
      <repositories>
        <repository>
            <id>spring-snapshots</id>
            <url>http://repo.spring.io/libs-snapshot</url>
        </repository>
    </repositories>   
    
</project>


注册的接口类,和要服务器端的包路径一致

package cn.zto.service;

public interface IProcessData {
    public String hello(String name);
}

客户端的配置文件,接口定义及zookeeper的地址
<?xml version="1.0" encoding="UTF-8"?>
<beans
    xmlns="http://www.springframework.org/schema/beans"
    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
    xmlns:dubbo="http://code.alibabatech.com/schema/dubbo"    xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd http://code.alibabatech.com/schema/dubbo http://code.alibabatech.com/schema/dubbo/dubbo.xsd ">
    <dubbo:application name="consumer-of-helloworld-app" />
    <!-- 注册地址 -->
     <dubbo:registry address="zookeeper://localhost:2181"/>
     <dubbo:consumer timeout="5000" />
     <dubbo:reference id="demoService" interface="cn.zto.service.IProcessData"/>
</beans>

客户端启动
package cn.zto.consumer;
import org.springframework.context.support.ClassPathXmlApplicationContext;

import cn.zto.service.IProcessData;

public class ConsumerThd{
    
    
    
public void sayHello(){
ClassPathXmlApplicationContext context=new ClassPathXmlApplicationContext(    
new String[] {"config/applicationProvider.xml"});
context.start();
IProcessData demoService=(IProcessData) context.getBean("demoService");
System.out.println(demoService.hello("world"));
}


public static void main(String args[]){
    new ConsumerThd().sayHello();
}



}


运行结果如下

log4j:WARN No appenders could be found for logger (org.springframework.context.support.ClassPathXmlApplicationContext).
log4j:WARN Please initialize the log4j system properly.
hello : world


这样就基本实现了dubbo的框架,很简单吧




posted @ 2016-05-04 17:03 猪眼睛| 编辑 收藏

spring boot 是spring 4.0提供的微框架,支持 jdk 1.8, maven 3以上,  否则会报一些错误。

1. pom 文件, 主要写依赖关系,
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>
    <groupId>com.example</groupId>
    <artifactId>spring-boot</artifactId>
    <version>0.0.1-SNAPSHOT</version>
    <name>spring-boot-example</name>
    
    <parent>
       <groupId>org.springframework.boot</groupId>
       <artifactId>spring-boot-starter-parent</artifactId>
       <version>1.3.3.RELEASE</version>
    </parent>
    
    <!-- Add typical dependencies for a web application -->
    
    <dependencies>
        <dependency>
           <groupId>org.springframework.boot</groupId>
           <artifactId>spring-boot-starter-web</artifactId>
        </dependency>
        
        
        
    </dependencies>

    <repositories>
        <repository>
            <id>spring-snapshots</id>
            <url>http://repo.spring.io/libs-snapshot</url>
        </repository>
    </repositories>

    <pluginRepositories>
        <pluginRepository>
            <id>spring-snapshots</id>
            <url>http://repo.spring.io/libs-snapshot</url>
        </pluginRepository>
    </pluginRepositories>

    <build>
        <plugins>
            <plugin>
                <groupId>org.springframework.boot</groupId>
                <artifactId>spring-boot-maven-plugin</artifactId>
            </plugin>
        </plugins>
    </build>
</project>

2. 然后就是提供的接口类UserController, 实体类User, 主要是几个注解
@RestController   l类同spring mvc 的@Controller
@RequestMapping   类同spring mvc

package two;

import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;

@RestController
@RequestMapping("/user")

public class UserController {

    @RequestMapping("/hello")
    public User view(){
        
        User user=new User();
        user.setId((long)100);
        user.setName("fanjs");
        return user;
        
        
    }

package two;

public class User {
private Long id;
private String name;
public Long getId() {
    return id;
}
public void setId(Long id) {
    this.id = id;
}
public String getName() {
    return name;
}
public void setName(String name) {
    this.name = name;
}


3.  然后就是程序启动,这里不需要spring xml文件,完全依赖注解。
@EnableAutoConfiguration
@Configuration
@ComponentScan

package two;

import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Configuration;

@Configuration
@ComponentScan
@EnableAutoConfiguration


public class RunMain {

    /**
     * @param args
     */
    public static void main(String[] args) {
        // TODO Auto-generated method stub

        SpringApplication.run(RunMain.class, args);
    }

}

4.  测试路径

http://localhost:8080/user/hello

{"id":100,"name":"fanjs"}






posted @ 2016-05-04 15:39 猪眼睛| 编辑 收藏

1、maven包查询:

   http://mvnrepository.com/
2、maven公共仓库

   http://mirrors.ibiblio.org/pub/mirrors/maven2/
   http://gradle.artifactoryonline.com/gradle/libs

   http://gradle.artifactoryonline.com/gradle/plugins
   http://google-maven-repository.googlecode.com/svn/repository
   http://maven.springframework.org/release
   http://maven.springframework.org/milestone
   http://repository.codehaus.org
   http://repository.jboss.org/nexus/content/groups/public-jboss
   http://repo.jfrog.org/artifactory/plugins-releases-local
   http://repo.jfrog.org/artifactory/libs-releases-local
   http://download.java.net/maven/1
   http://download.java.net/maven/2
   http://repo1.maven.org/maven2
   http://www.intalio.org/public/maven2/

   http://www.jarvana.com/jarvana/browse/

   http://maven.alfresco.com/nexus/content/groups/public

posted @ 2016-05-04 10:22 猪眼睛| 编辑 收藏

facet 自己理解就是分组聚合用的, 如下说明
 


http://blog.csdn.net/a925907195/article/details/47257243



Solr中的group与facet的区别

如果是简单的使用的话,那么Facet与group都可以用来进行数据的聚合查询,但是他们还是有很大的区别的。

首先上facet跟group的操作:

Facet的例子:

public voidFacetFieldQuery() throws Exception {

      solrServer = createSolrServer();

      SolrQueryquery = newSolrQuery();//建立一个新的查询

      query.setQuery("jobsName:计算机维护");

      query.setFacet(true);//设置facet=on

      // 分类信息分为:薪水,发布时间,教育背景,工作经验,公司类型,工作类型

      query.addFacetField(new String[] {"salary","publishDate",

            "educateBackground","jobExperience","companytype","jobsType" });//设置需要facet的字段

      query.setFacetLimit(10);// 限制facet返回的数量

      query.setFacetMissing(false);//不统计null的值

      query.setFacetMinCount(1);// 设置返回的数据中每个分组的数据最小值,比如设置为1,则统计数量最小为1,不然不显示

 

      //query.addFacetQuery("publishDate:[2014-04-11T00:00:00Z TO2014-04-13T00:00:00Z]");

      QueryResponseresponse = solrServer.query(query);

      System.out.println("查询时间:" + response.getQTime());

      List<FacetField>facets = response.getFacetFields();//返回的facet列表

      for (FacetField facet :facets) {

         System.out.println(facet.getName());

         System.out.println("----------------");

         List<Count>counts = facet.getValues();

         for (Count count : counts){

            System.out.println(count.getName()+":"+ count.getCount());

         }

         System.out.println();

      }

 

   }

运行结果如下:

查询时间:66

salary

----------------

面议:6882

2001-4000:1508

其他:671

4001-6000:536

3000-4499:224

2000-2999:181

6001-8000:179

3000-5000:82

1000-2000:81

4500-5999:75

 

publishDate

----------------

2014-08-05T00:00:00Z:793

2014-08-04T00:00:00Z:775

2014-07-30T00:00:00Z:601

2014-08-07T00:00:00Z:548

2014-08-06T00:00:00Z:539

2014-08-11T00:00:00Z:472

2014-08-20T00:00:00Z:439

2014-08-12T00:00:00Z:438

2014-08-01T00:00:00Z:405

2014-08-03T00:00:00Z:376

 

educateBackground

----------------

大专:4486

本科:1872

其他:1344

不限:1147

中专:680

高中:472

薪水范围::430

中技:161

初中:140

硕士:94

 

jobExperience

----------------

其他:2623

不限:2249

1-3年:1770

1年:1301

2年:773

3-4年:528

3-5年:379

应届毕业生:309

5-7年:162

1年以上:136

 

companytype

----------------

民营公司:3702

民营:2605

国企:835

股份制企业:729

其他:707

合资:632

外资(非欧美):377

外商独资:350

外资(欧美):271

上市公司:228

 

jobsType

----------------

全职:10734

兼职:59

实习:39

 

 

Group查询:

/**group查询

    * @throws Exception

    */

   public void GroupFieldQuery() throws Exception {

      solrServer = createSolrServer();

       SolrQuery query = new SolrQuery("jobsName:计算机维护");

        // 设置通过facet查询为true,表示查询时使用facet机制

        query.setParam(GroupParams.GROUP,true);   

        query.setParam(GroupParams.GROUP_FIELD,"salary");

        // 设置每个quality对应的

        query.setParam(GroupParams.GROUP_LIMIT,"1");

        // 设置返回doc文档数据,因只需要数量,故设置为0

        query.setRows(10);

        QueryResponse response = solrServer.query(query);

        if (response !=null) {

          GroupResponse groupResponse =response.getGroupResponse();   

            if(groupResponse !=null) {   

          List<GroupCommand> groupList =groupResponse.getValues();    

          for(GroupCommand groupCommand : groupList){   

              List<Group> groups =groupCommand.getValues();   

              for(Group group : groups) {

                System.out.println("group查询..."+group.getGroupValue()+"数量为:"+group.getResult().getNumFound());

              }   

          }   

            }   

        }

 

   }

group查询...面议数量为:6882

group查询...4500-5999数量为:75

group查询...2001-4000数量为:1508

group查询...其他数量为:671

group查询...2000-2999数量为:181

group查询...4001-6000数量为:536

group查询...2000-4000数量为:19

group查询...2000-3000数量为:34

group查询...3000-4499数量为:224

group查询...3000-5000数量为:82

 

facet的查询结果主要是分组信息:有什么分组,每个分组包括多少记录;但是分组中有哪些数据是不可知道的,只有进一步搜索。
group则类似于关系数据库的group by,可以用于一个或者几个字段去重、显示一个group的前几条记录等。

The Grouping feature only works if groups are inthe same shard. You must use the custom sharding feature to use the Groupingfeature.

 

两者其实用起来还是有比较大的区别的,但是如果说区别的话可以看下wiki上的这段

Field Collapsing and Result Grouping aredifferent ways to think about the same Solr feature.

Field Collapsing collapsesa group of results with the same field value down to a single (or fixed number)of entries. For example, most search engines such as Google collapse on site soonly one or two entries are shown, along with a link to click to see moreresults from that site. Field collapsing can also be used to suppress duplicatedocuments.

Result Grouping groupsdocuments with a common field value into groups, returning the top documentsper group, and the top groups based on what documents are in the groups. Oneexample is a search at Best Buy for a common term such as DVD, that shows thetop 3 results for each category ("TVs &Video","Movies","Computers", etc)

 

下面这两个查询语句一个是facet的一个是group的

http://localhost:8080/solr/JobsOtherWeb0/select?q=jobsName%3A%E8%AE%A1%E7%AE%97%E6%9C%BA%E7%BB%B4%E6%8A%A4&group=true&group.field=salary&group.limit=1&rows=10

http://localhost:8080/solr/JobsOtherWeb0/select?q=jobsName%3A%E8%AE%A1%E7%AE%97%E6%9C%BA%E7%BB%B4%E6%8A%A4&facet=true&facet.field=salary&facet.field=publishDate&facet.field=educateBackground&facet.field=jobExperience&facet.field=companytype&facet.field=jobsType&facet.limit=10&facet.missing=false&facet.mincount=1

 

其中facet查询出的如下:(只截取部分结果)




根据条件查询出的是查询结果,facet是聚类后的信息跟查询条件是分开的,查询结果也跟facet没关系。

但是下面看group查询的




也就是你的查询条件是跟group相关的,返回的查询结果也是跟group相关的,比如说你想要查询的结果在每个分组中 都有数据采集,那么就最好用group,这样出来的数据跟group也是相关的,但是有个问题,比如说你要查询group每个采集1个,ok那么你查询的 时候的条件rows就无效了(也不能说无效,主要是看你怎么使用),就是最多每个分组给你返回一个,多了没有了。

再细说点就是如果你想查询归查询聚类归聚类,那么使用facet,如果想使用类似采集的效果,每个group分组采集多少个,那么使用group查询。

posted @ 2016-05-03 16:34 猪眼睛| 编辑 收藏


[root@f2c node_work]# cat ServiceRoute.js
/*************************
* 服务路由接口
* @author
*
**************************/
var http = require('http'),
        url = require('url'),
        amqplib = require('amqplib'),
        async = require('async'),
        uuid = require('node-uuid');

var open = require('amqplib').connect('amqp://10.0.16.101:5672');
http.createServer(function(req, res){
        /*** 参数判断 ***/
        var param = url.parse(req.url).query;
        if(param==null || param=="") {
                res.writeHead(200, {'Content-Type':'text/html'});
                res.write("no message", 'utf8');
                res.end();
        }
        else {
                /*** 参数处理 ***/
                console.log("*****************start*******************");
                var paramArr = param.split("&");
                var messageInfo = paramArr[0].split("=");
                var queueName = messageInfo[0];
                var b = new Buffer(messageInfo[1], 'base64');
                var mes = b.toString();
                console.log("*param="+new Date().toLocaleString());
                console.log("*param="+param);
                console.log("*request message = "+mes);
                var callBackInfo = null;
                if(paramArr.length>=2) callBackInfo = paramArr[1].split("=");
                /*** 返回队列 ***/
                var uuIdStr = uuid.v1()+"_a";
                var common_options = {durable: false, autoDelete:true, exclusive:true};
                /*** 接收消息 ***/
                open.then(function(conn){
                        var  ok = conn.createChannel();
                        ok = ok.then(function(ch){
                                ch.assertQueue(uuIdStr, common_options);

                                var onSecond = 1000 * 1;
                                var timer=setTimeout(function(){
                                        console.log("*setTimeOut");
                                        res.write('{"s":-1, "error":"channel connect time out"}', 'utf8');
                                        ch.close();
                                }, onSecond);

                                ch.consume(uuIdStr, function(msg) {
                                        console.log("*response="+msg.content.toString());
                                        ch.close();
                                        clearTimeout(timer)
                                        /*** 返回信息到客户端 ***/
                                        if(callBackInfo!=null)
                                                res.write(callBackInfo[1]+"('"+msg.content.toString().replace("'", "\'")+"')", 'utf8');
                                        else
                                                res.write(msg.content.toString(), 'utf8');
                                        res.end();
                                });
                                ch.on("error", function(err){
                                        console.log("*response error="+err);
                                });
                                ch.on("close", function(){
                                        console.log("*response close method is called");
                                });

                        });
                }).then(null, console.warn);
                /*** 发送消息 ***/
                open.then(function(conn) {
                var ok = conn.createChannel();
                ok = ok.then(function(ch){
                                ch.sendToQueue(queueName, new Buffer(mes), {correlationId:uuIdStr});
                                ch.close();
                                ok.then(ok.close.bind(ok));
                        });
                }).then(null, console.warn);

        }
}).listen(8081);
posted @ 2014-01-06 10:57 猪眼睛| 编辑 收藏

做系统集成新的方式,主要是消息处理机制,采用通道的方式。



简单的配置文件
<beans:beans xmlns="http://www.springframework.org/schema/integration"
    xmlns:xsi
="http://www.w3.org/2001/XMLSchema-instance"
    xmlns:beans
="http://www.springframework.org/schema/beans"
    xmlns:context
="http://www.springframework.org/schema/context"
    xsi:schemaLocation
="http://www.springframework.org/schema/beans
        http://www.springframework.org/schema/beans/spring-beans-2.5.xsd
        http://www.springframework.org/schema/integration
        http://www.springframework.org/schema/integration/spring-integration-1.0.xsd
        http://www.springframework.org/schema/context
        http://www.springframework.org/schema/context/spring-context-2.5.xsd"
>

    
<!-- 启动Message bus 消息服务总线 支持四个属性 
            auto-startup[boolean是否自动启动 default=true]如果设置false,则需要手动调用applicationContext.start()方法
            auto-create-channels[boolean是否自动注册MessageChannel default=false],如果使用的MessagChannle不存在
            error-channel 设置错误时信息发送的MessageChannle,如果不设置,则使用DefaultErrorChannel
            dispatcher-pool-size 使用的启动线程数,默认为10
-->
    
<message-bus/>
    
<!-- 启动支持元数据标记 -->
    
<annotation-driven/>
    
<!-- 设置 @Component标识的元数据扫描包(package) -->
    
<context:component-scan base-package="org.springframework.integration.samples.cafe"/>

        
<!-- 下面启动了四个 MessageChannel服务 处理接收发送端发过来的消息和把消息流转到消息的消费端 -->
        
<!-- 属性说明: capacity 消息最大容量默认为100 publish-subscribe是否是发布订阅模式,默认为否
                                        id bean的id名称 datatype ? 
-->
    
<channel id="orders"/> <!-- 订单Channel -->
    
<channel id="drinks"/> <!-- 饮料订单Channel,处理饮料的类别 -->
    
<channel id="coldDrinks"/> <!-- 热饮生产Channel -->
    
<channel id="hotDrinks"/> <!-- 冷饮生产Channel -->

        
<!-- 消息处理终端 接收 channel coldDrinks的消息后,执行barista.prepareColdDrink方法 生产冷饮 -->
        
<!-- 属性说明: input-channel 接收消息的Channel必须 default-output-channel设置默认回复消息Channel
                                        handler-ref 引用bean的id名称 handler-method Handler处理方法名(参数类型必须与发送消息的payLoad使用的一致)
                                        error-handler设置错误时信息发送的MessageChannle   reply-handler 消息回复的Channel 
-->
    
<endpoint input-channel="coldDrinks" handler-ref="barista"
                                         handler-method
="prepareColdDrink"/>

        
<!-- 消息处理终端 接收 channel hotDrinks的消息后,执行barista.prepareHotDrink方法 生产热饮 -->
    
<endpoint input-channel="hotDrinks" handler-ref="barista"
                                        handler-method
="prepareHotDrink"/>

        
<!-- 定义一个启动下定单操作的bean,它通过 channel orders下定单 -->
    
<beans:bean id="cafe" class="org.springframework.integration.samples.cafe.Cafe">
        
<beans:property name="orderChannel" ref="orders"/>
    
</beans:bean>
</beans:beans>
posted @ 2013-11-28 16:15 猪眼睛| 编辑 收藏

netty是一套高性能的通讯架构,这里我用netty实现http服务器实现信息采集功能。主要是利用他现有的hander处理器,解析出request头,做信息采集使用,重写了他自己的hander.


package io.netty.example.http.snoop;

import static io.netty.handler.codec.http.HttpHeaders.getHost;
import static io.netty.handler.codec.http.HttpHeaders.isKeepAlive;
import static io.netty.handler.codec.http.HttpHeaders.Names.CONNECTION;
import static io.netty.handler.codec.http.HttpHeaders.Names.CONTENT_LENGTH;
import static io.netty.handler.codec.http.HttpHeaders.Names.CONTENT_TYPE;
import static io.netty.handler.codec.http.HttpHeaders.Names.COOKIE;
import static io.netty.handler.codec.http.HttpHeaders.Names.SET_COOKIE;
import static io.netty.handler.codec.http.HttpResponseStatus.BAD_REQUEST;
import static io.netty.handler.codec.http.HttpResponseStatus.OK;
import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;

import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Map.Entry;

import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.codec.http.Cookie;
import io.netty.handler.codec.http.CookieDecoder;
import io.netty.handler.codec.http.DefaultFullHttpResponse;
import io.netty.handler.codec.http.FullHttpResponse;
import io.netty.handler.codec.http.HttpContent;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http.HttpObject;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.LastHttpContent;
import io.netty.handler.codec.http.QueryStringDecoder;
import io.netty.handler.codec.http.ServerCookieEncoder;
import io.netty.util.CharsetUtil;

public class HttpSnoopServiceTxt extends SimpleChannelInboundHandler<Object> {

    private HttpRequest request;
    /** Buffer that stores the response content */
    private final StringBuilder buf = new StringBuilder();

    @Override
    protected void channelRead0(ChannelHandlerContext ctx, Object msg)
            throws Exception {
        // TODO Auto-generated method stub
        if (msg instanceof HttpRequest) {
            HttpRequest request = this.request = (HttpRequest) msg;
            buf.setLength(0);
            // hostname
            buf.append("HOSTNAME:").append(getHost(request, "unknown"));
            // url
            buf.append("REQUEST_URI:").append(request.getUri());
            // parm
            QueryStringDecoder queryStringDecoder = new QueryStringDecoder(request.getUri());
            Map<String, List<String>> params = queryStringDecoder.parameters();
            if (!params.isEmpty()) {
                for (Entry<String, List<String>> p : params.entrySet()) {
                    String key = p.getKey();
                    List<String> vals = p.getValue();
                    for (String val : vals) {
                        buf.append("PARAM:").append(key).append("=")
                                .append(val);
                    }
                }
            }
            //cookie
            
        }
        if (msg instanceof HttpContent) {
            if (msg instanceof LastHttpContent) {
                LastHttpContent trailer = (LastHttpContent) msg;
                writeResponse(trailer, ctx);
                WriterFile.printtxt(buf.toString());
            }
        }
    }

    @Override
    public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
        ctx.flush();
    }

    private boolean writeResponse(HttpObject currentObj,ChannelHandlerContext ctx) {
        boolean keepAlive = isKeepAlive(request);
        FullHttpResponse response = new DefaultFullHttpResponse(HTTP_1_1,
                currentObj.getDecoderResult().isSuccess() ? OK : BAD_REQUEST,
                Unpooled.copiedBuffer(buf.toString(), CharsetUtil.UTF_8));
        response.headers().set(CONTENT_TYPE, "text/plain; charset=UTF-8");
        if (keepAlive) {
            response.headers().set(CONTENT_LENGTH,
                    response.content().readableBytes());
            response.headers().set(CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
        }
        ctx.write(response);
        return keepAlive;
    }

}


/*
 * Copyright 2012 The Netty Project
 *
 * The Netty Project licenses this file to you under the Apache License,
 * version 2.0 (the "License"); you may not use this file except in compliance
 * with the License. You may obtain a copy of the License at:
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 */
package io.netty.example.http.snoop;

import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.Channel;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioServerSocketChannel;

/**
 * An HTTP server that sends back the content of the received HTTP request
 * in a pretty plaintext form.
 */
public class HttpSnoopServer {

    private final int port;

    public HttpSnoopServer(int port) {
        this.port = port;
    }

    public void run() throws Exception {
        // Configure the server.
        EventLoopGroup bossGroup = new NioEventLoopGroup();
        EventLoopGroup workerGroup = new NioEventLoopGroup();
        try {
            ServerBootstrap b = new ServerBootstrap();
            b.group(bossGroup, workerGroup)
             .channel(NioServerSocketChannel.class)
             .childHandler(new HttpSnoopServerInitializer());

            Channel ch = b.bind(port).sync().channel();
            ch.closeFuture().sync();
        } finally {
            bossGroup.shutdownGracefully();
            workerGroup.shutdownGracefully();
        }
    }

    public static void main(String[] args) throws Exception {
        int port;
        if (args.length > 0) {
            port = 8080;
        } else {
            port = 8080;
        }
        new HttpSnoopServer(port).run();
    }
}
/*
 * Copyright 2012 The Netty Project
 *
 * The Netty Project licenses this file to you under the Apache License,
 * version 2.0 (the "License"); you may not use this file except in compliance
 * with the License. You may obtain a copy of the License at:
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 */
package io.netty.example.http.snoop;

import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.socket.SocketChannel;
import io.netty.handler.codec.http.HttpRequestDecoder;
import io.netty.handler.codec.http.HttpResponseEncoder;

public class HttpSnoopServerInitializer extends ChannelInitializer<SocketChannel> {
    @Override
    public void initChannel(SocketChannel ch) throws Exception {
        // Create a default pipeline implementation.
        ChannelPipeline p = ch.pipeline();

        // Uncomment the following line if you want HTTPS
        //SSLEngine engine = SecureChatSslContextFactory.getServerContext().createSSLEngine();
        //engine.setUseClientMode(false);
        //p.addLast("ssl", new SslHandler(engine));

        p.addLast("decoder", new HttpRequestDecoder());
        // Uncomment the following line if you don't want to handle HttpChunks.
        //p.addLast("aggregator", new HttpObjectAggregator(1048576));
        p.addLast("encoder", new HttpResponseEncoder());
        // Remove the following line if you don't want automatic content compression.
        //p.addLast("deflater", new HttpContentCompressor());
        
        p.addLast("handler", new HttpSnoopServiceTxt());
        //p.addLast("handler", new HttpSnoopServerHandler());
    }
}
posted @ 2013-11-23 15:54 猪眼睛| 编辑 收藏

电商系统需要记录用户行为,需要一个高并发高速写入文件,考虑利用缓存和noi机制写入数据,具体逻辑是2块缓存区,一块写数据,一块写文件,交替进行,并且利用noi机制一次写入数据。

测试结果: 1亿条数据用时93秒,生产58个100m文件。每一条953纳秒。

package io.netty.example.http.snoop;

import java.io.FileOutputStream;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CharsetEncoder;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;

public class WriterFile {


        // 指定大小为 1024 的缓冲区
        public static ByteBuffer bytebufferone = ByteBuffer.allocate(102400000);
        public static ByteBuffer bytebuffertwo = ByteBuffer.allocate(102400000);
        public static boolean checkbuffer =true;
        
        public static void main(String[] args) {
             long start = System.nanoTime();  
              
                
            for(int i=0;i<100000000;i++){
                
                if(checkbuffer)
                    processone("123abc"+i+"\r\n");
                else
                    prcesstwo("123abc"+i+"\r\n");    
            }
            long end = System.nanoTime();  
            System.out.println((end - start)+"耗时");  
        }
        /**
         * bytebuffertwo写日志
         */
        public static void prcesstwo(String log)
        {
            //写bytebuff
             boolean onecheck=checkposition(log,bytebuffertwo);
            if(onecheck)
                writerbuffer(log,bytebuffertwo);
            //写文件
            else{
                checkbuffer=true;
                writerbuffer(log,bytebufferone);
                writerfile(bytebuffertwo);
            }
        }
        
        /**
         * bytebufferone写日志
         * @param log
         */
        public static  void  processone(String log)
        {
                //写bytebuff
                 boolean onecheck=checkposition(log,bytebufferone);
                if(onecheck){
                    
                    writerbuffer(log,bytebufferone);
                }
                //写文件
                else{
                    checkbuffer=false;
                    writerbuffer(log,bytebuffertwo);
                    writerfile(bytebufferone);
                }
            }
        
        /**
         * 判断缓存是否可以写下日志
         * @param log
         * @return
         */
        public static  boolean checkposition(String log,ByteBuffer bytebuffer)
        {

            if(2*log.getBytes().length>bytebuffer.limit()-bytebuffer.position())
            {
                return false;
            }
            else
            {
                return true;
            }
        }
        /**
         * 写日志到缓存,并且返回缓存指针位置
         * @param log
         * @return
         */
        public static   int  writerbuffer(String log,ByteBuffer bytebuffer )
        {
            for (int i = 0; i < log.length(); i++) {
                bytebuffer.putChar(log.charAt(i));
            }
            return bytebuffer.position();
        }
        /**
         * 写文件
         * @param filename
         */
        public static  void  writerfile(ByteBuffer bytebuffer)
        {
            try{
                FileOutputStream fos = new FileOutputStream(Datefile());
                FileChannel fc = fos.getChannel();
                bytebuffer.flip();
                fc.write(bytebufferone);
                fc.close();
                fos.close();
                bytebuffer.clear();
            }
            catch(Exception ex)
            {
                ex.printStackTrace();
            }
        }
        /**
         * 文件名按日期生产
         * @param str
         * @return
         */
        public static String Datefile() {
               SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd_HHmmss");
               String str = format.format(new Date());
               return "d:/test/"+str+".txt";
        }
}


附带一个普通的nio读写
        public static  void test()
        {
            try{
            FileOutputStream fos = new FileOutputStream("d:/nio.txt");
            // 得到文件通道
            FileChannel fc = fos.getChannel();
            // 指定大小为 1024 的缓冲区
            ByteBuffer bf = ByteBuffer.allocate(1024);
            // 要写入文件的字符串
            String greeting = "Hello111";
            // 把以上字符串逐字放入缓冲区
            for (int i = 0; i < greeting.length(); i++) {
                bf.putChar(greeting.charAt(i));
            }
            // 记得执行这个方法,使得 position=0, limit=30, 才能写入正确的数据
            // 否则 position 为 30, limit 为 1024,将会把 30 之后的全部空数据(0) 填到文件中
            
            System.out.println(greeting.getBytes().length);
            System.out.println(bf.position());
            System.out.println(bf.limit());
            
            bf.flip();
            // 缓冲区数据写入到文件中,会把缓冲区中从 position 到 limit 之间的数据写入文件
            fc.write(bf);
            fc.close(); // 关闭文件通道
            fos.close(); // 关闭文件输出流
            }catch(Exception e){
             e.printStackTrace();
            
            }
        }
posted @ 2013-11-23 11:16 猪眼睛| 编辑 收藏