0

I am following the RecorderPlay example in the simple-openni library for processing:

http://code.google.com/p/simple-openni/

It records to an ONI file, a type of video format which retains depth and rgb information. Currently though, it captures the entire image, and I want to be able to capture only a certain z depth range. Does anyone know if this is possible?

mheavers
  • 29,530
  • 58
  • 194
  • 315

1 Answers1

0

I don't think you can configure what depth range to record, but what you could do is process the information you read from the .oni recording:

import SimpleOpenNI.*;

SimpleOpenNI  context;
boolean       recordFlag = true;
boolean       saving = false;
int frames = 0;
int savedFrames = 0;
//change these two values as you wish:
float minZ = 100;
float maxZ = 500;

void setup(){
  context = new SimpleOpenNI(this);

  if(! recordFlag){
    if(! context.openFileRecording("test.oni") ){
      println("can't find recording !!!!");
      exit();
    }
    context.enableDepth();
  }else{  
    // recording
    context.enableDepth();
    // setup the recording 
    context.enableRecorder(SimpleOpenNI.RECORD_MEDIUM_FILE,"test.oni");
    // select the recording channels
    context.addNodeToRecording(SimpleOpenNI.NODE_DEPTH,SimpleOpenNI.CODEC_16Z_EMB_TABLES);
  }
  // set window size 
  if((context.nodes() & SimpleOpenNI.NODE_DEPTH) != 0)
    size(context.depthWidth() , context.depthHeight());
  else 
    exit();
}
void draw()
{
  background(0);
  context.update();
  if((context.nodes() & SimpleOpenNI.NODE_DEPTH) != 0) image(context.depthImage(),0,0);
  if(recordFlag) frames++;
  if(saving && savedFrames < frames){
      delay(3000);//hack
      int i = savedFrames;
      int w = context.depthWidth();
      int h = context.depthHeight();
      PrintWriter output = createWriter(dataPath("frame_"+i+".ply"));
      output.println("ply");
      output.println("format ascii 1.0");
      output.println("element vertex " + (w*h));
      output.println("property float x");
      output.println("property float y");
      output.println("property float z");
      output.println("end_header\n");
      rect(random(width),random(height),100,100);
      int[]   depthMap = context.depthMap();
      int     index;
      PVector realWorldPoint;
      for(int y=0;y < h;y++){
        for(int x=0;x < w;x++){
          index = x + y * w;
          realWorldPoint = context.depthMapRealWorld()[index];
          if(realWorldPoint.z > minZ && realWorldPoint.z < maxZ)
            output.println(realWorldPoint.x + " " + realWorldPoint.y + " " + realWorldPoint.z);
        }
      }
      output.flush();
      output.close();
      println("saved " + (i+1) + " of " + frames);
      savedFrames++;
  }
}
void keyPressed(){
  if(key == ' '){
    if(recordFlag){
      saveStrings(dataPath("frames.txt"),split(frames+" ",' '));
      exit();
    }else saveONIToPLY();
  }
}
void saveONIToPLY(){
  frames = int(loadStrings(dataPath("frames.txt"))[0]);
  saving = true;
  println("recording " + frames + " frames");
}
George Profenza
  • 50,687
  • 19
  • 144
  • 218
  • I think you're right. I found an openFrameworks code library that allows me to take an ONI file and export it at a certain z depth, but this code is great for visualizing it. Thanks! – mheavers Jul 24 '12 at 16:18
  • Cool, it's great you can use openFrameworks, speed is so much better for kinect stuff. For more advanced point cloud processing you can also have a look at [Point Clouds Library](http://pointclouds.org/), but bare in mind it's not super straight forward to setup (as it has a lot of dependencies). You project sounds interesting, would be cool to see something online at some point :) Gooduck! – George Profenza Jul 24 '12 at 16:24
  • Sure thing, I'll send you some stuff I'm working on as soon as it's finished. – mheavers Jul 25 '12 at 00:05