软件编程
位置:首页>> 软件编程>> java编程>> 教你怎么使用hadoop来提取文件中的指定内容

教你怎么使用hadoop来提取文件中的指定内容

作者:日京  发布时间:2021-07-08 02:22:50 

标签:hadoop,提取,文件内容
目录
  • 一、需求

  • 二、步骤

  • 三、结果

一、需求

把以下txt中含“baidu”字符串的链接输出到一个文件,否则输出到另外一个文件。

教你怎么使用hadoop来提取文件中的指定内容

二、步骤

1.LogMapper.java


package com.whj.mapreduce.outputformat;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class LogMapper extends Mapper<LongWritable,Text,Text,NullWritable> {
   @Override
   protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//        不做任何处理
       context.write(value,NullWritable.get());
   }
}

2.LogReducer.java


package com.whj.mapreduce.outputformat;

import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class LogReducer extends Reducer<Text,NullWritable,Text,NullWritable> {
   @Override
   protected void reduce(Text key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
       for (NullWritable value : values) {
           context.write(key,NullWritable.get());
       }
   }
}

3.LogOutputFormat.java


package com.whj.mapreduce.outputformat;

import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class LogOutputFormat extends FileOutputFormat<Text,NullWritable> {
   @Override
   public RecordWriter<Text, NullWritable> getRecordWriter(TaskAttemptContext job) throws IOException, InterruptedException {
       LogRecordWriter lrw = new LogRecordWriter(job);
       return lrw;
   }
}

4.LogRecordWriter.java


package com.whj.mapreduce.outputformat;

import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;

import java.io.IOException;

public class LogRecordWriter extends RecordWriter<Text,NullWritable> {

private FSDataOutputStream baiduOut;//ctrl+alt+f
   private FSDataOutputStream otherOut;

public LogRecordWriter(TaskAttemptContext job) throws IOException {
//创建两条流
       FileSystem fs = FileSystem.get(job.getConfiguration());
       baiduOut = fs.create(new Path("D:\\temp\\outputformat.log"));
       otherOut = fs.create(new Path("D:\\temp\\other.log"));
   }

@Override
   public void write(Text key, NullWritable nullWritable) throws IOException, InterruptedException {
//        具体写
       String log = key.toString();
       if(log.contains("baidu")){
           baiduOut.writeBytes(log+"\n");
       }else{
           otherOut.writeBytes(log+"\n");
       }
   }

@Override
   public void close(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
//关流
       IOUtils.closeStream(baiduOut);
       IOUtils.closeStream(otherOut);
   }
}

5.LogDriver.java


package com.whj.mapreduce.outputformat;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class LogDriver {
   public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
       Configuration conf = new Configuration();
       Job job = Job.getInstance(conf);
       job.setJarByClass(LogDriver.class);
       job.setMapperClass(LogMapper.class);
       job.setReducerClass(LogReducer.class);
       job.setMapOutputKeyClass(Text.class);
       job.setMapOutputValueClass(NullWritable.class);
       job.setOutputKeyClass(Text.class);
       job.setOutputValueClass(NullWritable.class);
       //设置自定义的 outputformat
       job.setOutputFormatClass(LogOutputFormat.class);
       FileInputFormat.setInputPaths(job, new Path("D:\\input"));
       // 虽 然 我 们 自 定 义 了 outputformat , 但 是 因 为 我 们 的 outputformat 继承自fileoutputformat
       //而 fileoutputformat 要输出一个_SUCCESS 文件,所以在这还得指定一个输出目录
       FileOutputFormat.setOutputPath(job, new Path("D:\\temp\\logoutput"));
       boolean b = job.waitForCompletion(true);
       System.exit(b ? 0 : 1);
   } }

三、结果

教你怎么使用hadoop来提取文件中的指定内容

来源:https://blog.csdn.net/weixin_43719616/article/details/117230449

0
投稿

猜你喜欢

手机版 软件编程 asp之家 www.aspxhome.com