Monday, May 6, 2013


SoftKinetic Iisu SDK.
How to get SCENE.VertexArray in close interaction mode.

My small wrapper for Iisu data streams:
 class ImageData  
 {  
 public:  
      ImageData(SK::Device *device, const SK::String &path); // For DEPTH | CONFIDENCE | COLOR  
      ImageData(SK::Device *device, const SK::String &dataPath, int width, int height, float HFOV, float VFOV);
      // For UV | SceneLabelImage  
      ~ImageData();  
   
      void *data;  
   
      void update();  
   
      bool isEnabled() { return hImage.isAutoUpdated(); }  
      void setEnabled(bool enable);  
   
      int getWidth() const { return m_width; }  
      int getHeight() const { return m_height; }  
   
      float getHFOV() const { return m_HFOV; }  
      float getVFOV() const { return m_VFOV; }  
   
 private:  
      ImageData();  
      ImageData(const ImageData &);  
   
      SK::Device *m_device;  
      SK::String m_path;  
      SK::DataHandle<SK::Image> hImage;  
      int m_width;  
      int m_height;  
      float m_HFOV;  
      float m_VFOV;  
 };  
 
 ImageData::ImageData( SK::Device *device, const SK::String &path )  
      : m_device(device)  
      , m_path(path)  
      , data(NULL)  
 {  
      SK::ParameterHandle<int> hWidth = m_device->registerParameterHandle<int>(path + ".Width");  
      m_width = hWidth.get();  
      SK::ParameterHandle<int> hHeight = m_device->registerParameterHandle<int>(path + ".Height");  
      m_height = hHeight.get();  
      SK::ParameterHandle<float> hHFOV = m_device->registerParameterHandle<float>(path + ".HFOV");  
      m_HFOV = hHFOV.get();  
      SK::ParameterHandle<float> hVFOV = m_device->registerParameterHandle<float>(path + ".VFOV");  
      m_VFOV = hVFOV.get();  
   
      hImage = m_device->registerDataHandle<SK::Image>(path + ".Image", false);  
      cout << m_path << " data stream have registered\n";  
 }  
   
 ImageData::ImageData( SK::Device *device, const SK::String &dataPath, int width, int height, float HFOV, float VFOV )  
      : m_device(device)  
      , m_path(dataPath)  
      , data(NULL)  
      , m_width(width)  
      , m_height(height)  
      , m_HFOV(HFOV)  
      , m_VFOV(VFOV)  
 {  
      hImage = m_device->registerDataHandle<SK::Image>(dataPath, false);  
      cout << m_path << " data stream have registered\n";  
 }  
   
 ImageData::~ImageData()  
 {  
      m_device->unregisterDataHandle<SK::Image>(hImage);  
 }  
   
 void ImageData::update()  
 {  
      if (isEnabled()) {  
           const SK::Image &image = hImage.get();  
           data = image.getRAW();  
      }  
 }  
   
 void ImageData::setEnabled( bool enable )  
 {   
      hImage.setAutoUpdate(enable);  
      if (!enable) {  
           data = NULL;  
      }  
 }
Get vertices from depth:
 DepthCamera depthCamera;  
   
 // DepthCamera class has members:  
 ImageData depthData;  
 ImageData colorData;  
 ImageData uvData;  
 PointCloud pointCloud;  
   
 void DepthCamera::DepthCamera()  
 {  
      // First of all you need to setup depthCamera  
      // with depthData.getVFOV()  
      // and range (near/far planes) - from 15 cm to 1 m - nominal operating range of DS325  
      // In my scene I use centimetric setup: near plane = 15.0f and far plane = 100.0f  
      // And, of course, aspect ratio = depthData.getWidth() / depthData.getHeight()  
   
      SK::EventManager &evtMgr = m_device->getEventManager();  
      evtMgr.registerEventListener("DEVICE.DataFrame", *this, &DepthCamera::onDataFrame);  
 }  
   
 void DepthCamera::onDataFrame( const SK::DataFrameEvent &e )  
 {  
      if (resupdate.failed()) {  
           cerr << "Failed updating data frame\n";  
      } else {  
           updateData(); // Update depth/color/uv  
             
           getVerticesFromDepth(); // Get normalized screen coordinates of all point  
           pointCloud.transform(getModelViewProjlMatrix().inverted()); // Move from NDC to world-space  
           pointCloud.recalcAabb(); // Because transformation was nonlinear  
      }  
 }  
 void DepthCamera::getVerticesFromDepth()  
 {  
      if (depthData.isEnabled()) {  
           uint16_t *depthArray = (uint16_t *)depthData.data;  
           uint8_t *colorArray = (uint8_t *)colorData.data;  
           for (int i = 0; i < depthData.getWidth(); ++i) {  
                for (int j = 0; j < depthData.getHeight(); ++j) {  
                     PointCloud::Vertex vertex;  
                     uint16_t depth = depthArray[i + j * depthData.getWidth()];  
                       
                     // Get normalized screen coordinates (Xn, Yn, Zn)  
                     // Read this article if you have any questions   
                     float Ze = -0.1f * depth; // mm to cm and move to eye-space  
                     float Zn = depthFromEyeToNDC(Ze); // NDC - normalized device coordinates       
                     // Perform near/far clipping  
                     if (qAbs(Zn) > 1.0f)  
                          continue;       
                     float u = (0.5f + i) / depthData.getWidth(); // Center of the pixel has half pixel offset  
                     float v = (0.5f + j) / depthData.getHeight();  
                     float Xn = u * 2.0f - 1.0f;  
                     float Yn = 1.0f - v * 2.0f; // Flip Y  
                     vertex.pos = Vector3(Xn, Yn, Zn);  
                       
                     // Color  
                     int index = 2 * (i + j * depthData.getWidth());  
                     float colorU = ((float *)uvData.data)[index];  
                     float colorV = ((float *)uvData.data)[index + 1];  
                     int xColor = colorData.getWidth() * colorU;  
                     int yColor = colorData.getHeight() * colorV;  
                     int colorIndex = 4 * (xColor + yColor * colorData.getWidth());  
                     vertex.r = colorArray[colorIndex + 2];  
                     vertex.g = colorArray[colorIndex + 1];  
                     vertex.b = colorArray[colorIndex];  
                     vertex.a = 1;  
   
                     pointCloud.addVertex(vertex);  
                }  
           }  
      }  
 }  
 float Camera::depthFromEyeToNDC( float Ze ) const  
 {  
      // Read this article if you have any questions  
      // And I can explain it for you  
        
      const float &A = m_projMatrix(2, 2);  
      const float &B = m_projMatrix(2, 3);  
      return (A * Ze + B) / -Ze;  
 } 
That's all!