月度归档:2011年07月

HDFS的JAVA接口API操作实例(转)

转自:http://blog.csdn.net/wang382758656/article/details/5771332

@import url(http://www.blogjava.net/CuteSoft_Client/CuteEditor/Load.ashx?type=style&file=SyntaxHighlighter.css);@import url(/css/cuteeditor.css);

1.Copy a file from the local file system to HDFS
The srcFile variable needs to contain the full name (path + file name) of the file in the local file system. 
The dstFile variable needs to contain the desired full name of the file in the Hadoop file system.

Configuration config = new Configuration();

  FileSystem hdfs = FileSystem.get(config);

  Path srcPath = new Path(srcFile);

  Path dstPath = new Path(dstFile);

  hdfs.copyFromLocalFile(srcPath, dstPath);



2.Create HDFS file
The fileName variable contains the file name and path in the Hadoop file system. 
The content of the file is the buff variable which is an array of bytes.

//byte[] buff - The content of the file



  Configuration config = new Configuration();

  FileSystem hdfs = FileSystem.get(config);

  Path path = new Path(fileName);

  FSDataOutputStream outputStream = hdfs.create(path);

  outputStream.write(buff, 0, buff.length);


3.Rename HDFS file
In order to rename a file in Hadoop file system, we need the full name (path + name) of 
the file we want to rename. The rename method returns true if the file was renamed, otherwise false.

Configuration config = new Configuration();

  FileSystem hdfs = FileSystem.get(config);

  Path fromPath = new Path(fromFileName);

  Path toPath = new Path(toFileName);

  boolean isRenamed = hdfs.rename(fromPath, toPath);



4.Delete HDFS file
In order to delete a file in Hadoop file system, we need the full name (path + name) 
of the file we want to delete. The delete method returns true if the file was deleted, otherwise false.

Configuration config = new Configuration();

  FileSystem hdfs = FileSystem.get(config);

  Path path = new Path(fileName);

  boolean isDeleted = hdfs.delete(path, false);



Recursive delete:

  Configuration config = new Configuration();

  FileSystem hdfs = FileSystem.get(config);

  Path path = new Path(fileName);

  boolean isDeleted = hdfs.delete(path, true);


 
  
5.Get HDFS file last modification time
In order to get the last modification time of a file in Hadoop file system, 
we need the full name (path + name) of the file.

Configuration config = new Configuration();

  FileSystem hdfs = FileSystem.get(config);

  Path path = new Path(fileName);

  FileStatus fileStatus = hdfs.getFileStatus(path);

  long modificationTime = fileStatus.getModificationTime


  
 6.Check if a file exists in HDFS
In order to check the existance of a file in Hadoop file system, 
we need the full name (path + name) of the file we want to check. 
The exists methods returns true if the file exists, otherwise false.

Configuration config = new Configuration();

  FileSystem hdfs = FileSystem.get(config);

  Path path = new Path(fileName);

  boolean isExists = hdfs.exists(path);


  
 7.Get the locations of a file in the HDFS cluster
 A file can exist on more than one node in the Hadoop file system cluster for two reasons:
Based on the HDFS cluster configuration, Hadoop saves parts of files on different nodes in the cluster.
Based on the HDFS cluster configuration, Hadoop saves more than one copy of each file on different nodes for redundancy (The default is three).
 

Configuration config = new Configuration();

  FileSystem hdfs = FileSystem.get(config);

  Path path = new Path(fileName);

  FileStatus fileStatus = hdfs.getFileStatus(path);



  BlockLocation[] blkLocations = hdfs.getFileBlockLocations(path, 0, fileStatus.getLen());

BlockLocation[] blkLocations = hdfs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());

     //这个地方,作者写错了,需要把path改为fileStatus

  int blkCount = blkLocations.length;

  for (int i=0; i < blkCount; i++) {

    String[] hosts = blkLocations[i].getHosts();

    // Do something with the block hosts

   }


8. Get a list of all the nodes host names in the HDFS cluster

  his method casts the FileSystem Object to a DistributedFileSystem Object. 
  This method will work only when Hadoop is configured as a cluster. 
  Running Hadoop on the local machine only, in a non cluster configuration will
   cause this method to throw an Exception.
   

Configuration config = new Configuration();

  FileSystem fs = FileSystem.get(config);

  DistributedFileSystem hdfs = (DistributedFileSystem) fs;

  DatanodeInfo[] dataNodeStats = hdfs.getDataNodeStats();

  String[] names = new String[dataNodeStats.length];

  for (int i = 0; i < dataNodeStats.length; i++) {

      names[i] = dataNodeStats[i].getHostName();

  }


  
  
程序实例

/*

 * 

 * 演示操作HDFS的java接口

 * 

 * */






import org.apache.hadoop.conf.*;

import org.apache.hadoop.fs.*;

import org.apache.hadoop.hdfs.*;

import org.apache.hadoop.hdfs.protocol.*;

import java.util.Date;



public class DFSOperater {



    /**

     * @param args

     */


    public static void main(String[] args) {



        Configuration conf = new Configuration();

        

        try {

            // Get a list of all the nodes host names in the HDFS cluster



            FileSystem fs = FileSystem.get(conf);

            DistributedFileSystem hdfs = (DistributedFileSystem)fs;

            DatanodeInfo[] dataNodeStats = hdfs.getDataNodeStats();

            String[] names = new String[dataNodeStats.length];

            System.out.println("list of all the nodes in HDFS cluster:"); //print info



            for(int i=0; i < dataNodeStats.length; i++){

                names[i] = dataNodeStats[i].getHostName();

                System.out.println(names[i]); //print info



            }

            Path f = new Path("/user/cluster/dfs.txt");

            

            //check if a file exists in HDFS



            boolean isExists = fs.exists(f);

            System.out.println("The file exists? [" + isExists + "]");

            

            //if the file exist, delete it



            if(isExists){

                 boolean isDeleted = hdfs.delete(f, false);//fase : not recursive



                 if(isDeleted)System.out.println("now delete " + f.getName());                 

            }

            

            //create and write



            System.out.println("create and write [" + f.getName() + "] to hdfs:");

            FSDataOutputStream os = fs.create(f, true, 0);

            for(int i=0; i<10; i++){

                os.writeChars("test hdfs ");

            }

            os.writeChars("/n");

            os.close();

            

            //get the locations of a file in HDFS



            System.out.println("locations of file in HDFS:");

            FileStatus filestatus = fs.getFileStatus(f);

            BlockLocation[] blkLocations = fs.getFileBlockLocations(filestatus, 0,filestatus.getLen());

            int blkCount = blkLocations.length;

            for(int i=0; i < blkCount; i++){

                String[] hosts = blkLocations[i].getHosts();

                //Do sth with the block hosts



                System.out.println(hosts);

            }

            

            //get HDFS file last modification time



            long modificationTime = filestatus.getModificationTime(); // measured in milliseconds since the epoch



            Date d = new Date(modificationTime);

         System.out.println(d);

            //reading from HDFS



            System.out.println("read [" + f.getName() + "] from hdfs:");

     FSDataInputStream dis = fs.open(f);

     System.out.println(dis.readUTF());

     dis.close();



        } catch (Exception e) {

            // TODO: handle exception



            e.printStackTrace();

        }

                

    }



}


mysqld_multi 启动 mysql 多实例

@import url(http://www.blogjava.net/CuteSoft_Client/CuteEditor/Load.ashx?type=style&file=SyntaxHighlighter.css);@import url(/css/cuteeditor.css);

因为连接数的问题,我不得不在一台机器上多启 mysql instance
1. 建立两个 mysql 数据库实例
mysql_install_db –datadir=xxx
2. 配置 /etc/my.cnf
[client]
#password = your_password
#port = 3306
#socket = /var/lib/mysql/mysql.sock
# Here follows entries for some specific programs
[mysqld_multi] 
mysqld = /usr/bin/mysqld_safe 
mysqladmin = /usr/bin/mysqladmin 
user = mysql
password = mysql
[mysqld1]
datadir = /home/intple/mysql/data1
max_connections = 800
long_query_time = 0.1
#log-queries-not-using-indexes
slow_query_log = 1
slow_query_log_file = /var/log/mysql-slow.log
port = 3306
socket = /home/intple/mysql/data1/mysql.sock
skip-locking
key_buffer_size = 384M
max_allowed_packet = 1M
table_open_cache = 512
sort_buffer_size = 2M
read_buffer_size = 2M
read_rnd_buffer_size = 8M
myisam_sort_buffer_size = 64M
thread_cache_size = 32
query_cache_size = 512M
# Try number of CPU’s*2 for thread_concurrency
thread_concurrency = 32
log-bin=mysql-bin
server-id = 1
binlog_format=mixed
innodb_buffer_pool_size = 20G
innodb_additional_mem_pool_size = 32M
innodb_thread_concurrency = 32
# Set .._log_file_size to 25 % of buffer pool size
innodb_log_file_size = 256M
innodb_log_buffer_size = 32M
innodb_flush_log_at_trx_commit = 1
innodb_autoextend_increment = 64M
innodb_lock_wait_timeout = 200
[mysqld2]
datadir = /home/intple/mysql/data2
max_connections = 800
long_query_time = 0.1
#log-queries-not-using-indexes
slow_query_log = 1
slow_query_log_file = /var/log/mysql2-slow.log
port = 3307
socket = /home/intple/mysql/data2/mysql2.sock
skip-locking
key_buffer_size = 384M
max_allowed_packet = 1M
table_open_cache = 512
sort_buffer_size = 2M
read_buffer_size = 2M
read_rnd_buffer_size = 8M
myisam_sort_buffer_size = 64M
thread_cache_size = 32
query_cache_size = 512M
# Try number of CPU’s*2 for thread_concurrency
thread_concurrency = 32
log-bin=mysql-bin
server-id = 1
binlog_format=mixed
innodb_buffer_pool_size = 20G
innodb_additional_mem_pool_size = 32M
innodb_thread_concurrency = 32
# Set .._log_file_size to 25 % of buffer pool size
innodb_log_file_size = 256M
innodb_log_buffer_size = 32M
innodb_flush_log_at_trx_commit = 1
innodb_autoextend_increment = 64M
innodb_lock_wait_timeout = 200
[mysqldump]
quick
max_allowed_packet = 16M
[mysql]
no-auto-rehash
# Remove the next comment character if you are not familiar with SQL
#safe-updates
[myisamchk]
key_buffer_size = 256M
sort_buffer_size = 256M
read_buffer = 2M
write_buffer = 2M
[mysqlhotcopy]
interactive-timeout
3. 启动
mysqld_multi start 1 &
mysqld_multi start 2 &