SlideShare a Scribd company logo
1 of 24
HBASE INTRODUCTION &
DEVELOPMENT
The term planet-size web application comes to mind, and in this case it is fitting
WHAT IS IT?
• It is the Hadoop database,
• Sclable
• Distributed
• BigDatastore
• Column Oriented
HBASE
HDFS
Reader
Writer
FEATURES OF HBASE
• Scalable.
• Automatic failover
• Consistent reads and writes.
• Sharding of tables
• Failover support
• Classes for backing Hadoop map reduce
jobs
• Java API for client access
• Thrift gateway and a REST Web
WHAT IT IS NOT
•No-Sql
•No relation
•No joins
•Not a replacement of RDBMS
• NoSQL
• HBase is a type of "NoSQL" database. "NoSQL" is a general term meaning that the
database isn't an RDBMS which supports SQL as its primary access language.
• When we should think of using it
• HBase isn't suitable for every problem. We should have lot of data, if data is less
RDBMS is better.
• Difference Between HDFS and HBase
• HDFS is a distributed file system that is well suited for the storage of large files. It's
documentation states that it is not, however, a general purpose file system, and does not
provide fast individual record lookups in files. HBase, on the other hand, is built on top of
HDFS and provides fast record lookups (and updates) for large tables.
THINK ON
THIS
• Facebook, for example, is adding more than 15 TB, and processing daily
• Google adding Peta-Bytes of data and processing.
• Companies storing Logs, temperature details, and many other prospective
to store and process, which come in Peta-byte for which conventional
technologies will days to read the data forget about processing it.
WHAT IS COLUMNS ORIENTED
MEANS
• Grouped by columns,
• The reason to store values on a per-column basis
instead is based on the assumption
• that, for specific queries, not all of the values are
needed.
• Reduced I/O
COMPONENTS
HMASTER
• Master server is responsible for
monitoring all RegionServer instances in
the cluster, and is the interface for all
metadata changes, it runs on the server
which hosts namenode.
• Master controls critical functions such as
RegionServer failover and completing
region splits. So while the cluster can still
run for a time without the Master, the
Master should be restarted as soon as
ZOOKEEP
ER
• Zookeeper is an open source software
providing a highly reliable, distributed
coordination service
• Entry point for an HBase system
• It includes tracking of region servers,
where the root region is hosted
API
• Interface to HBase
• Using these we can we can access HBase and perform
read/write and other operation on HBase.
• REST, Thrift, and Avro
• Thrift API framework, for scalable cross-language
services development, combines a software stack with a
code generation engine to build services that work
efficiently and seamlessly between C++, Java, Python,
PHP, Ruby, Erlang, Perl, Haskell, C#, Cocoa, JavaScript,
Node.js, Smalltalk, OCaml and Delphi and other
languages.
• lib
• commons-configuration-1.8.jar
• commons-lang-2.6.jar
• commons-logging-1.1.1.jar
• hadoop-core-1.0.0.jar
• hbase-0.92.1.jar
• log4j-1.2.16.jar
• slf4j-api-1.5.8.jar
• slf4j-log4j12-1.5.8.jar
• zookeeper-3.4.3.jar
• import org.apache.hadoop.conf.Configuration;
• import org.apache.hadoop.hbase.HBaseConfiguration;
• import org.apache.hadoop.hbase.HColumnDescriptor;
• import org.apache.hadoop.hbase.HTableDescriptor;
• import org.apache.hadoop.hbase.KeyValue;
• import org.apache.hadoop.hbase.MasterNotRunningException;
• import org.apache.hadoop.hbase.ZooKeeperConnectionException;
• import org.apache.hadoop.hbase.client.Delete;
• import org.apache.hadoop.hbase.client.Get;
• import org.apache.hadoop.hbase.client.HBaseAdmin;
• import org.apache.hadoop.hbase.client.HTable;
• import org.apache.hadoop.hbase.client.Result;
• import org.apache.hadoop.hbase.client.ResultScanner;
• import org.apache.hadoop.hbase.client.Scan;
• import org.apache.hadoop.hbase.client.Put;
• import org.apache.hadoop.hbase.util.Bytes;
• Configuration hConf = HBaseConfiguration.create(conf);
hConf.set(Constants.HBASE_CONFIGURATION_ZOOKEEPER_QUO
RUM, hbaseZookeeperQuorum);
hConf.set(Constants.HBASE_CONFIGURATION_ZOOKEEPER_CLIE
NTPORT, hbaseZookeeperClientPort); HTable hTable = new
HTable(hConf, tableName);
• public class HBaseTest {
• private static Configuration conf = null;
• /**
• * Initialization
• */
• static {
• conf = HBaseConfiguration.create();
• }
• }
• /** Create a table
• */
• public static void creatTable(String tableName, String[] familys)
• throws Exception { HBaseAdmin admin = new
HBaseAdmin(conf);
• if (admin.tableExists(tableName)) { System.out.println("table
already exists!");
• } else { HTableDescriptor tableDesc = new
HTableDescriptor(tableName);
• for (int i = 0; i < familys.length; i++) {
• tableDesc.addFamily(new
HColumnDescriptor(familys[i]));
• }admin.createTable(tableDesc);
• System.out.println("create table " + tableName + " ok.");
• }
• /** * Delete a table
• */
• public static void deleteTable(String tableName) throws Exception {
• try {HBaseAdmin admin = new HBaseAdmin(conf);
• admin.disableTable(tableName);
• admin.deleteTable(tableName);
• System.out.println("delete table " + tableName + " ok.");
• } catch (MasterNotRunningException e) {
• e.printStackTrace();
• } catch (ZooKeeperConnectionException e) {
• e.printStackTrace();
• }
• }
• /**
• * Put (or insert) a row
• */
• public static void addRecord(String tableName, String rowKey,
String family, String qualifier, String value) throws Exception {
• try {HTable table = new HTable(conf, tableName);
• Put put = new Put(Bytes.toBytes(rowKey));
• put.add(Bytes.toBytes(family), Bytes.toBytes(qualifier),
Bytes
• .toBytes(value));
• table.put(put);
• System.out.println("insert recored " + rowKey + " to
table "
• + tableName + " ok.");
• } catch (IOException e) {e.printStackTrace();
• /**
• * Delete a row
• */
• public static void delRecord(String tableName, String rowKey)
• throws IOException {HTable table = new HTable(conf,
tableName);
• List<Delete> list = new ArrayList<Delete>();
• Delete del = new Delete(rowKey.getBytes());
• list.add(del);
• table.delete(list);
• System.out.println("del recored " + rowKey + " ok.");
• }
• /**Get a row
• */
• public static void getOneRecord (String tableName, String rowKey)
throws IOException{
• HTable table = new HTable(conf, tableName);
• Get get = new Get(rowKey.getBytes());
• Result rs = table.get(get);
• for(KeyValue kv : rs.raw()){
• System.out.print(new String(kv.getRow()) + " " );
• System.out.print(new String(kv.getFamily()) + ":" );
• System.out.print(new String(kv.getQualifier()) + " " );
• System.out.print(kv.getTimestamp() + " " );
• System.out.println(new String(kv.getValue()));
• }
• }
• /** Scan (or list) a table */
• public static void getAllRecord (String tableName) {
• try{
• HTable table = new HTable(conf, tableName);
• Scan s = new Scan();
• ResultScanner ss = table.getScanner(s);
• for(Result r:ss){
• for(KeyValue kv : r.raw()){
• System.out.print(new String(kv.getRow()) + " ");
• System.out.print(new String(kv.getFamily()) + ":"); System.out.print(new
String(kv.getQualifier()) + " ");
• System.out.print(kv.getTimestamp() + " "); System.out.println(new
String(kv.getValue())); } }
• } catch (IOException e){
• e.printStackTrace();
• }
• }
• public static void main(String[] agrs) {
• try {
• String tablename = "scores";
• String[] familys = { "grade", "course" };
• HBaseTest.creatTable(tablename, familys);
• // add record zkb
• HBaseTest.addRecord(tablename, "zkb", "grade", "", "5");
• HBaseTest.addRecord(tablename, "zkb", "course", "", "90");
• HBaseTest.addRecord(tablename, "zkb", "course", "math", "97");
• HBaseTest.addRecord(tablename, "zkb", "course", "art", "87");
• // add record baoniu
• HBaseTest.addRecord(tablename, "baoniu", "grade", "", "4");
• HBaseTest.addRecord(tablename, "baoniu", "course", "math", "89");
•
• System.out.println("===========get one record========");
• HBaseTest.getOneRecord(tablename, "zkb");
• System.out.println("===========show all record========");
• HBaseTest.getAllRecord(tablename);
• System.out.println("===========del one record========");
• HBaseTest.delRecord(tablename, "baoniu");
• HBaseTest.getAllRecord(tablename);
• System.out.println("===========show all record========");
• HBaseTest.getAllRecord(tablename);
• } catch (Exception e) {
• e.printStackTrace();
• }
• }}
Gmail dwivedishashwat@gmail.com
Twitter shashwat_2010
Facebook shriparv@gmail.com
Skype shriparv

More Related Content

What's hot

Hadoop secondary sort and a custom comparator
Hadoop secondary sort and a custom comparatorHadoop secondary sort and a custom comparator
Hadoop secondary sort and a custom comparator
Subhas Kumar Ghosh
 
Hw09 Hadoop Development At Facebook Hive And Hdfs
Hw09   Hadoop Development At Facebook  Hive And HdfsHw09   Hadoop Development At Facebook  Hive And Hdfs
Hw09 Hadoop Development At Facebook Hive And Hdfs
Cloudera, Inc.
 
06 how to write a map reduce version of k-means clustering
06 how to write a map reduce version of k-means clustering06 how to write a map reduce version of k-means clustering
06 how to write a map reduce version of k-means clustering
Subhas Kumar Ghosh
 
Hadoop deconstructing map reduce job step by step
Hadoop deconstructing map reduce job step by stepHadoop deconstructing map reduce job step by step
Hadoop deconstructing map reduce job step by step
Subhas Kumar Ghosh
 

What's hot (20)

04 pig data operations
04 pig data operations04 pig data operations
04 pig data operations
 
Map reduce in Hadoop
Map reduce in HadoopMap reduce in Hadoop
Map reduce in Hadoop
 
Repartition join in mapreduce
Repartition join in mapreduceRepartition join in mapreduce
Repartition join in mapreduce
 
Hadoop Mapreduce joins
Hadoop Mapreduce joinsHadoop Mapreduce joins
Hadoop Mapreduce joins
 
Hadoop MapReduce framework - Module 3
Hadoop MapReduce framework - Module 3Hadoop MapReduce framework - Module 3
Hadoop MapReduce framework - Module 3
 
Hive
HiveHive
Hive
 
Computing Scientometrics in Large-Scale Academic Search Engines with MapReduce
Computing Scientometrics in Large-Scale Academic Search Engines with MapReduceComputing Scientometrics in Large-Scale Academic Search Engines with MapReduce
Computing Scientometrics in Large-Scale Academic Search Engines with MapReduce
 
Import web resources using R Studio
Import web resources using R StudioImport web resources using R Studio
Import web resources using R Studio
 
Hadoop secondary sort and a custom comparator
Hadoop secondary sort and a custom comparatorHadoop secondary sort and a custom comparator
Hadoop secondary sort and a custom comparator
 
Hw09 Hadoop Development At Facebook Hive And Hdfs
Hw09   Hadoop Development At Facebook  Hive And HdfsHw09   Hadoop Development At Facebook  Hive And Hdfs
Hw09 Hadoop Development At Facebook Hive And Hdfs
 
Upgrading To The New Map Reduce API
Upgrading To The New Map Reduce APIUpgrading To The New Map Reduce API
Upgrading To The New Map Reduce API
 
Map Reduce Execution Architecture
Map Reduce Execution Architecture Map Reduce Execution Architecture
Map Reduce Execution Architecture
 
Hadoop job chaining
Hadoop job chainingHadoop job chaining
Hadoop job chaining
 
06 how to write a map reduce version of k-means clustering
06 how to write a map reduce version of k-means clustering06 how to write a map reduce version of k-means clustering
06 how to write a map reduce version of k-means clustering
 
ACADILD:: HADOOP LESSON
ACADILD:: HADOOP LESSON ACADILD:: HADOOP LESSON
ACADILD:: HADOOP LESSON
 
report on aadhaar anlysis using bid data hadoop and hive
report on aadhaar anlysis using bid data hadoop and hivereport on aadhaar anlysis using bid data hadoop and hive
report on aadhaar anlysis using bid data hadoop and hive
 
Hadoop deconstructing map reduce job step by step
Hadoop deconstructing map reduce job step by stepHadoop deconstructing map reduce job step by step
Hadoop deconstructing map reduce job step by step
 
Hadoop combiner and partitioner
Hadoop combiner and partitionerHadoop combiner and partitioner
Hadoop combiner and partitioner
 
Hive User Meeting August 2009 Facebook
Hive User Meeting August 2009 FacebookHive User Meeting August 2009 Facebook
Hive User Meeting August 2009 Facebook
 
Apache Scoop - Import with Append mode and Last Modified mode
Apache Scoop - Import with Append mode and Last Modified mode Apache Scoop - Import with Append mode and Last Modified mode
Apache Scoop - Import with Append mode and Last Modified mode
 

Similar to H base introduction & development

Data Processing with Cascading Java API on Apache Hadoop
Data Processing with Cascading Java API on Apache HadoopData Processing with Cascading Java API on Apache Hadoop
Data Processing with Cascading Java API on Apache Hadoop
Hikmat Dhamee
 
Hadoop 20111117
Hadoop 20111117Hadoop 20111117
Hadoop 20111117
exsuns
 

Similar to H base introduction & development (20)

Nov. 4, 2011 o reilly webcast-hbase- lars george
Nov. 4, 2011 o reilly webcast-hbase- lars georgeNov. 4, 2011 o reilly webcast-hbase- lars george
Nov. 4, 2011 o reilly webcast-hbase- lars george
 
Intro to HBase - Lars George
Intro to HBase - Lars GeorgeIntro to HBase - Lars George
Intro to HBase - Lars George
 
מיכאל
מיכאלמיכאל
מיכאל
 
HBaseCon 2012 | HBase Coprocessors – Deploy Shared Functionality Directly on ...
HBaseCon 2012 | HBase Coprocessors – Deploy Shared Functionality Directly on ...HBaseCon 2012 | HBase Coprocessors – Deploy Shared Functionality Directly on ...
HBaseCon 2012 | HBase Coprocessors – Deploy Shared Functionality Directly on ...
 
HBase and Hadoop at Urban Airship
HBase and Hadoop at Urban AirshipHBase and Hadoop at Urban Airship
HBase and Hadoop at Urban Airship
 
6.hive
6.hive6.hive
6.hive
 
Hadoop - Apache Hbase
Hadoop - Apache HbaseHadoop - Apache Hbase
Hadoop - Apache Hbase
 
Hive @ Bucharest Java User Group
Hive @ Bucharest Java User GroupHive @ Bucharest Java User Group
Hive @ Bucharest Java User Group
 
HBaseConEast2016: HBase and Spark, State of the Art
HBaseConEast2016: HBase and Spark, State of the ArtHBaseConEast2016: HBase and Spark, State of the Art
HBaseConEast2016: HBase and Spark, State of the Art
 
HBase.pptx
HBase.pptxHBase.pptx
HBase.pptx
 
Data Processing with Cascading Java API on Apache Hadoop
Data Processing with Cascading Java API on Apache HadoopData Processing with Cascading Java API on Apache Hadoop
Data Processing with Cascading Java API on Apache Hadoop
 
HBase.pptx
HBase.pptxHBase.pptx
HBase.pptx
 
Yahoo! Hack Europe Workshop
Yahoo! Hack Europe WorkshopYahoo! Hack Europe Workshop
Yahoo! Hack Europe Workshop
 
Tajo Seoul Meetup July 2015 - What's New Tajo 0.11
Tajo Seoul Meetup July 2015 - What's New Tajo 0.11Tajo Seoul Meetup July 2015 - What's New Tajo 0.11
Tajo Seoul Meetup July 2015 - What's New Tajo 0.11
 
Apache HBase 1.0 Release
Apache HBase 1.0 ReleaseApache HBase 1.0 Release
Apache HBase 1.0 Release
 
HBase lon meetup
HBase lon meetupHBase lon meetup
HBase lon meetup
 
Hadoop 20111117
Hadoop 20111117Hadoop 20111117
Hadoop 20111117
 
What's New Tajo 0.10 and Its Beyond
What's New Tajo 0.10 and Its BeyondWhat's New Tajo 0.10 and Its Beyond
What's New Tajo 0.10 and Its Beyond
 
Building Google-in-a-box: using Apache SolrCloud and Bigtop to index your big...
Building Google-in-a-box: using Apache SolrCloud and Bigtop to index your big...Building Google-in-a-box: using Apache SolrCloud and Bigtop to index your big...
Building Google-in-a-box: using Apache SolrCloud and Bigtop to index your big...
 
03 hive query language (hql)
03 hive query language (hql)03 hive query language (hql)
03 hive query language (hql)
 

More from Shashwat Shriparv

LibreOffice 7.3.pptx
LibreOffice 7.3.pptxLibreOffice 7.3.pptx
LibreOffice 7.3.pptx
Shashwat Shriparv
 

More from Shashwat Shriparv (20)

Learning Linux Series Administrator Commands.pptx
Learning Linux Series Administrator Commands.pptxLearning Linux Series Administrator Commands.pptx
Learning Linux Series Administrator Commands.pptx
 
LibreOffice 7.3.pptx
LibreOffice 7.3.pptxLibreOffice 7.3.pptx
LibreOffice 7.3.pptx
 
Kerberos Architecture.pptx
Kerberos Architecture.pptxKerberos Architecture.pptx
Kerberos Architecture.pptx
 
Suspending a Process in Linux.pptx
Suspending a Process in Linux.pptxSuspending a Process in Linux.pptx
Suspending a Process in Linux.pptx
 
Kerberos Architecture.pptx
Kerberos Architecture.pptxKerberos Architecture.pptx
Kerberos Architecture.pptx
 
Command Seperators.pptx
Command Seperators.pptxCommand Seperators.pptx
Command Seperators.pptx
 
Upgrading hadoop
Upgrading hadoopUpgrading hadoop
Upgrading hadoop
 
Hadoop migration and upgradation
Hadoop migration and upgradationHadoop migration and upgradation
Hadoop migration and upgradation
 
R language introduction
R language introductionR language introduction
R language introduction
 
Hbase interact with shell
Hbase interact with shellHbase interact with shell
Hbase interact with shell
 
H base development
H base developmentH base development
H base development
 
Hbase
HbaseHbase
Hbase
 
H base
H baseH base
H base
 
My sql
My sqlMy sql
My sql
 
Apache tomcat
Apache tomcatApache tomcat
Apache tomcat
 
Linux 4 you
Linux 4 youLinux 4 you
Linux 4 you
 
Introduction to apache hadoop
Introduction to apache hadoopIntroduction to apache hadoop
Introduction to apache hadoop
 
Next generation technology
Next generation technologyNext generation technology
Next generation technology
 
Configure h base hadoop and hbase client
Configure h base hadoop and hbase clientConfigure h base hadoop and hbase client
Configure h base hadoop and hbase client
 
Java interview questions
Java interview questionsJava interview questions
Java interview questions
 

Recently uploaded

Recently uploaded (20)

Understanding Discord NSFW Servers A Guide for Responsible Users.pdf
Understanding Discord NSFW Servers A Guide for Responsible Users.pdfUnderstanding Discord NSFW Servers A Guide for Responsible Users.pdf
Understanding Discord NSFW Servers A Guide for Responsible Users.pdf
 
Strategize a Smooth Tenant-to-tenant Migration and Copilot Takeoff
Strategize a Smooth Tenant-to-tenant Migration and Copilot TakeoffStrategize a Smooth Tenant-to-tenant Migration and Copilot Takeoff
Strategize a Smooth Tenant-to-tenant Migration and Copilot Takeoff
 
ProductAnonymous-April2024-WinProductDiscovery-MelissaKlemke
ProductAnonymous-April2024-WinProductDiscovery-MelissaKlemkeProductAnonymous-April2024-WinProductDiscovery-MelissaKlemke
ProductAnonymous-April2024-WinProductDiscovery-MelissaKlemke
 
Strategies for Unlocking Knowledge Management in Microsoft 365 in the Copilot...
Strategies for Unlocking Knowledge Management in Microsoft 365 in the Copilot...Strategies for Unlocking Knowledge Management in Microsoft 365 in the Copilot...
Strategies for Unlocking Knowledge Management in Microsoft 365 in the Copilot...
 
A Domino Admins Adventures (Engage 2024)
A Domino Admins Adventures (Engage 2024)A Domino Admins Adventures (Engage 2024)
A Domino Admins Adventures (Engage 2024)
 
Apidays New York 2024 - The value of a flexible API Management solution for O...
Apidays New York 2024 - The value of a flexible API Management solution for O...Apidays New York 2024 - The value of a flexible API Management solution for O...
Apidays New York 2024 - The value of a flexible API Management solution for O...
 
Top 10 Most Downloaded Games on Play Store in 2024
Top 10 Most Downloaded Games on Play Store in 2024Top 10 Most Downloaded Games on Play Store in 2024
Top 10 Most Downloaded Games on Play Store in 2024
 
Top 5 Benefits OF Using Muvi Live Paywall For Live Streams
Top 5 Benefits OF Using Muvi Live Paywall For Live StreamsTop 5 Benefits OF Using Muvi Live Paywall For Live Streams
Top 5 Benefits OF Using Muvi Live Paywall For Live Streams
 
Scaling API-first – The story of a global engineering organization
Scaling API-first – The story of a global engineering organizationScaling API-first – The story of a global engineering organization
Scaling API-first – The story of a global engineering organization
 
Mastering MySQL Database Architecture: Deep Dive into MySQL Shell and MySQL R...
Mastering MySQL Database Architecture: Deep Dive into MySQL Shell and MySQL R...Mastering MySQL Database Architecture: Deep Dive into MySQL Shell and MySQL R...
Mastering MySQL Database Architecture: Deep Dive into MySQL Shell and MySQL R...
 
Apidays New York 2024 - Scaling API-first by Ian Reasor and Radu Cotescu, Adobe
Apidays New York 2024 - Scaling API-first by Ian Reasor and Radu Cotescu, AdobeApidays New York 2024 - Scaling API-first by Ian Reasor and Radu Cotescu, Adobe
Apidays New York 2024 - Scaling API-first by Ian Reasor and Radu Cotescu, Adobe
 
Real Time Object Detection Using Open CV
Real Time Object Detection Using Open CVReal Time Object Detection Using Open CV
Real Time Object Detection Using Open CV
 
Apidays Singapore 2024 - Building Digital Trust in a Digital Economy by Veron...
Apidays Singapore 2024 - Building Digital Trust in a Digital Economy by Veron...Apidays Singapore 2024 - Building Digital Trust in a Digital Economy by Veron...
Apidays Singapore 2024 - Building Digital Trust in a Digital Economy by Veron...
 
Tata AIG General Insurance Company - Insurer Innovation Award 2024
Tata AIG General Insurance Company - Insurer Innovation Award 2024Tata AIG General Insurance Company - Insurer Innovation Award 2024
Tata AIG General Insurance Company - Insurer Innovation Award 2024
 
HTML Injection Attacks: Impact and Mitigation Strategies
HTML Injection Attacks: Impact and Mitigation StrategiesHTML Injection Attacks: Impact and Mitigation Strategies
HTML Injection Attacks: Impact and Mitigation Strategies
 
Data Cloud, More than a CDP by Matt Robison
Data Cloud, More than a CDP by Matt RobisonData Cloud, More than a CDP by Matt Robison
Data Cloud, More than a CDP by Matt Robison
 
Automating Google Workspace (GWS) & more with Apps Script
Automating Google Workspace (GWS) & more with Apps ScriptAutomating Google Workspace (GWS) & more with Apps Script
Automating Google Workspace (GWS) & more with Apps Script
 
GenAI Risks & Security Meetup 01052024.pdf
GenAI Risks & Security Meetup 01052024.pdfGenAI Risks & Security Meetup 01052024.pdf
GenAI Risks & Security Meetup 01052024.pdf
 
Strategies for Landing an Oracle DBA Job as a Fresher
Strategies for Landing an Oracle DBA Job as a FresherStrategies for Landing an Oracle DBA Job as a Fresher
Strategies for Landing an Oracle DBA Job as a Fresher
 
Bajaj Allianz Life Insurance Company - Insurer Innovation Award 2024
Bajaj Allianz Life Insurance Company - Insurer Innovation Award 2024Bajaj Allianz Life Insurance Company - Insurer Innovation Award 2024
Bajaj Allianz Life Insurance Company - Insurer Innovation Award 2024
 

H base introduction & development

  • 1. HBASE INTRODUCTION & DEVELOPMENT The term planet-size web application comes to mind, and in this case it is fitting
  • 2. WHAT IS IT? • It is the Hadoop database, • Sclable • Distributed • BigDatastore • Column Oriented HBASE HDFS Reader Writer
  • 3. FEATURES OF HBASE • Scalable. • Automatic failover • Consistent reads and writes. • Sharding of tables • Failover support • Classes for backing Hadoop map reduce jobs • Java API for client access • Thrift gateway and a REST Web
  • 4. WHAT IT IS NOT •No-Sql •No relation •No joins •Not a replacement of RDBMS
  • 5. • NoSQL • HBase is a type of "NoSQL" database. "NoSQL" is a general term meaning that the database isn't an RDBMS which supports SQL as its primary access language. • When we should think of using it • HBase isn't suitable for every problem. We should have lot of data, if data is less RDBMS is better. • Difference Between HDFS and HBase • HDFS is a distributed file system that is well suited for the storage of large files. It's documentation states that it is not, however, a general purpose file system, and does not provide fast individual record lookups in files. HBase, on the other hand, is built on top of HDFS and provides fast record lookups (and updates) for large tables.
  • 6. THINK ON THIS • Facebook, for example, is adding more than 15 TB, and processing daily • Google adding Peta-Bytes of data and processing. • Companies storing Logs, temperature details, and many other prospective to store and process, which come in Peta-byte for which conventional technologies will days to read the data forget about processing it.
  • 7. WHAT IS COLUMNS ORIENTED MEANS • Grouped by columns, • The reason to store values on a per-column basis instead is based on the assumption • that, for specific queries, not all of the values are needed. • Reduced I/O
  • 9. HMASTER • Master server is responsible for monitoring all RegionServer instances in the cluster, and is the interface for all metadata changes, it runs on the server which hosts namenode. • Master controls critical functions such as RegionServer failover and completing region splits. So while the cluster can still run for a time without the Master, the Master should be restarted as soon as
  • 10. ZOOKEEP ER • Zookeeper is an open source software providing a highly reliable, distributed coordination service • Entry point for an HBase system • It includes tracking of region servers, where the root region is hosted
  • 11. API • Interface to HBase • Using these we can we can access HBase and perform read/write and other operation on HBase. • REST, Thrift, and Avro • Thrift API framework, for scalable cross-language services development, combines a software stack with a code generation engine to build services that work efficiently and seamlessly between C++, Java, Python, PHP, Ruby, Erlang, Perl, Haskell, C#, Cocoa, JavaScript, Node.js, Smalltalk, OCaml and Delphi and other languages.
  • 12. • lib • commons-configuration-1.8.jar • commons-lang-2.6.jar • commons-logging-1.1.1.jar • hadoop-core-1.0.0.jar • hbase-0.92.1.jar • log4j-1.2.16.jar • slf4j-api-1.5.8.jar • slf4j-log4j12-1.5.8.jar • zookeeper-3.4.3.jar
  • 13. • import org.apache.hadoop.conf.Configuration; • import org.apache.hadoop.hbase.HBaseConfiguration; • import org.apache.hadoop.hbase.HColumnDescriptor; • import org.apache.hadoop.hbase.HTableDescriptor; • import org.apache.hadoop.hbase.KeyValue; • import org.apache.hadoop.hbase.MasterNotRunningException; • import org.apache.hadoop.hbase.ZooKeeperConnectionException; • import org.apache.hadoop.hbase.client.Delete; • import org.apache.hadoop.hbase.client.Get; • import org.apache.hadoop.hbase.client.HBaseAdmin; • import org.apache.hadoop.hbase.client.HTable; • import org.apache.hadoop.hbase.client.Result; • import org.apache.hadoop.hbase.client.ResultScanner; • import org.apache.hadoop.hbase.client.Scan; • import org.apache.hadoop.hbase.client.Put; • import org.apache.hadoop.hbase.util.Bytes;
  • 14. • Configuration hConf = HBaseConfiguration.create(conf); hConf.set(Constants.HBASE_CONFIGURATION_ZOOKEEPER_QUO RUM, hbaseZookeeperQuorum); hConf.set(Constants.HBASE_CONFIGURATION_ZOOKEEPER_CLIE NTPORT, hbaseZookeeperClientPort); HTable hTable = new HTable(hConf, tableName);
  • 15. • public class HBaseTest { • private static Configuration conf = null; • /** • * Initialization • */ • static { • conf = HBaseConfiguration.create(); • } • }
  • 16. • /** Create a table • */ • public static void creatTable(String tableName, String[] familys) • throws Exception { HBaseAdmin admin = new HBaseAdmin(conf); • if (admin.tableExists(tableName)) { System.out.println("table already exists!"); • } else { HTableDescriptor tableDesc = new HTableDescriptor(tableName); • for (int i = 0; i < familys.length; i++) { • tableDesc.addFamily(new HColumnDescriptor(familys[i])); • }admin.createTable(tableDesc); • System.out.println("create table " + tableName + " ok."); • }
  • 17. • /** * Delete a table • */ • public static void deleteTable(String tableName) throws Exception { • try {HBaseAdmin admin = new HBaseAdmin(conf); • admin.disableTable(tableName); • admin.deleteTable(tableName); • System.out.println("delete table " + tableName + " ok."); • } catch (MasterNotRunningException e) { • e.printStackTrace(); • } catch (ZooKeeperConnectionException e) { • e.printStackTrace(); • } • }
  • 18. • /** • * Put (or insert) a row • */ • public static void addRecord(String tableName, String rowKey, String family, String qualifier, String value) throws Exception { • try {HTable table = new HTable(conf, tableName); • Put put = new Put(Bytes.toBytes(rowKey)); • put.add(Bytes.toBytes(family), Bytes.toBytes(qualifier), Bytes • .toBytes(value)); • table.put(put); • System.out.println("insert recored " + rowKey + " to table " • + tableName + " ok."); • } catch (IOException e) {e.printStackTrace();
  • 19. • /** • * Delete a row • */ • public static void delRecord(String tableName, String rowKey) • throws IOException {HTable table = new HTable(conf, tableName); • List<Delete> list = new ArrayList<Delete>(); • Delete del = new Delete(rowKey.getBytes()); • list.add(del); • table.delete(list); • System.out.println("del recored " + rowKey + " ok."); • }
  • 20. • /**Get a row • */ • public static void getOneRecord (String tableName, String rowKey) throws IOException{ • HTable table = new HTable(conf, tableName); • Get get = new Get(rowKey.getBytes()); • Result rs = table.get(get); • for(KeyValue kv : rs.raw()){ • System.out.print(new String(kv.getRow()) + " " ); • System.out.print(new String(kv.getFamily()) + ":" ); • System.out.print(new String(kv.getQualifier()) + " " ); • System.out.print(kv.getTimestamp() + " " ); • System.out.println(new String(kv.getValue())); • } • }
  • 21. • /** Scan (or list) a table */ • public static void getAllRecord (String tableName) { • try{ • HTable table = new HTable(conf, tableName); • Scan s = new Scan(); • ResultScanner ss = table.getScanner(s); • for(Result r:ss){ • for(KeyValue kv : r.raw()){ • System.out.print(new String(kv.getRow()) + " "); • System.out.print(new String(kv.getFamily()) + ":"); System.out.print(new String(kv.getQualifier()) + " "); • System.out.print(kv.getTimestamp() + " "); System.out.println(new String(kv.getValue())); } } • } catch (IOException e){ • e.printStackTrace(); • } • }
  • 22. • public static void main(String[] agrs) { • try { • String tablename = "scores"; • String[] familys = { "grade", "course" }; • HBaseTest.creatTable(tablename, familys); • // add record zkb • HBaseTest.addRecord(tablename, "zkb", "grade", "", "5"); • HBaseTest.addRecord(tablename, "zkb", "course", "", "90"); • HBaseTest.addRecord(tablename, "zkb", "course", "math", "97"); • HBaseTest.addRecord(tablename, "zkb", "course", "art", "87"); • // add record baoniu • HBaseTest.addRecord(tablename, "baoniu", "grade", "", "4"); • HBaseTest.addRecord(tablename, "baoniu", "course", "math", "89"); •
  • 23. • System.out.println("===========get one record========"); • HBaseTest.getOneRecord(tablename, "zkb"); • System.out.println("===========show all record========"); • HBaseTest.getAllRecord(tablename); • System.out.println("===========del one record========"); • HBaseTest.delRecord(tablename, "baoniu"); • HBaseTest.getAllRecord(tablename); • System.out.println("===========show all record========"); • HBaseTest.getAllRecord(tablename); • } catch (Exception e) { • e.printStackTrace(); • } • }}