3

I have a map(Object key,Text value,Context context) , put a tupleWritable in the context with context.write(). and In the reduce(Text key,Iterable values,Context context),I read the tupleWritable ,but it's empty. below is my code.That confused me ,any help will be appreciated.

package boc.competition.team1;

import java.io.IOException;
import java.util.HashMap;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.join.TupleWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.MultipleInputs;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;;

public class App 
{
    public static class SCSTransMap extends Mapper<Object,Text,Text,TupleWritable>{
        private Text name = new Text();

        @Override
        public void map(Object key,Text value,Context context) throws IOException,InterruptedException{
                IntWritable i = new IntWritable(1);
                TupleWritable result = new TupleWritable(new IntWritable[] { i, new IntWritable(3)});
                System.out.println(result.get(0)+"====="+result.get(1));
//------here print the right value  1=====3
                context.write(name, result);
            }
        }
    }
    public static class reducer extends Reducer<Text,TupleWritable,Text,Text>{
        @Override
        public void reduce(Text key,Iterable<TupleWritable> values,Context context) throws IOException,InterruptedException{

            for(TupleWritable tuple:values) {
                System.out.println(tuple.get(0)+"====="+tuple.get(1));
// and here print 0=======0
            }

        }
    }

    public static void main( String[] args ) throws Exception
    {
        Configuration conf = new Configuration();

        Job job = Job.getInstance(conf,"team1Job");
        job.setJarByClass(App.class);
        job.setReducerClass(reducer.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(TupleWritable.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        MultipleInputs.addInputPath(job, new Path("C:\\Program Files\\PuTTY\\data\\scs\\Scs_Journal.csv"), TextInputFormat.class,SCSTransMap.class);
        FileOutputFormat.setOutputPath(job, new Path(OUT_PATH));

        System.exit(job.waitForCompletion(true)?0:1);
    }
}
wangguanguo
  • 181
  • 4

2 Answers2

0

I use a user define writable class instead of tupleWritable class to pass the value from map to reduce here is user define writtable

package boc.competition.team1;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Writable;

public class IntPairWritable implements Writable {
        private IntWritable value1;
        private IntWritable value2;

        public IntPairWritable() {
            value1 = new IntWritable();
            value2 = new IntWritable();
        }

        public IntPairWritable(int value1, int value2) {
            this.value1 = new IntWritable(value1);
            this.value2 = new IntWritable(value2);
        }

        public int getInt1() {
            return value1.get();
        }

        public int getInt2() {
            return value2.get();
        }

        @Override
        public String toString() {
            return value1.toString()+" "+value2.toString();
        }

        @Override
        public void readFields(DataInput in) throws IOException {
            value1.readFields(in);
            value2.readFields(in);
        }

        @Override
        public void write(DataOutput out) throws IOException {
            value1.write(out);
            value2.write(out);
        }
}
wangguanguo
  • 181
  • 4
0

According to the TupleWritable.java source file:

 * This is *not* a general-purpose tuple type. In almost all cases, users are
 * encouraged to implement their own serializable types, which can perform
 * better validation and provide more efficient encodings than this class is
 * capable. TupleWritable relies on the join framework for type safety and
 * assumes its instances will rarely be persisted, assumptions not only
 * incompatible with, but contrary to the general case.

Also see the answer from Chris Douglas-3 here:

You need access to TupleWritable::setWritten(int). If you want to use
TupleWritable outside the join package, then you need to make this
(and probably related methods, like clearWritten(int)) public and
recompile.

It seems safe to say that TupleWritable is not meant to be a publicly used class for MapReduce jobs.

Matthew R.
  • 615
  • 4
  • 12