public synchronized InputStream getBinaryStream() throws SQLException { checkFreed(); LargeObject subLO = lo.copy(); subLOs.add(subLO); subLO.seek(0, LargeObject.SEEK_SET); return subLO.getInputStream(); }
public synchronized InputStream getBinaryStream() throws SQLException { checkFreed(); LargeObject subLO = getLo(false).copy(); addSubLO(subLO); subLO.seek(0, LargeObject.SEEK_SET); return subLO.getInputStream(); }
public synchronized java.io.InputStream getBinaryStream(long pos, long length) throws SQLException { checkFreed(); LargeObject subLO = getLo(false).copy(); addSubLO(subLO); if (pos > Integer.MAX_VALUE) { subLO.seek64(pos - 1, LargeObject.SEEK_SET); } else { subLO.seek((int) pos - 1, LargeObject.SEEK_SET); } return subLO.getInputStream(length); }
public InputStream getBinaryStream(int columnIndex) throws SQLException { checkResultSet( columnIndex ); if (wasNullFlag) return null; if (connection.haveMinimumCompatibleVersion("7.2")) { //Version 7.2 supports BinaryStream for all PG bytea type //As the spec/javadoc for this method indicate this is to be used for //large binary values (i.e. LONGVARBINARY) PG doesn't have a separate //long binary datatype, but with toast the bytea datatype is capable of //handling very large values. Thus the implementation ends up calling //getBytes() since there is no current way to stream the value from the server byte b[] = getBytes(columnIndex); if (b != null) return new ByteArrayInputStream(b); } else { // In 7.1 Handle as BLOBS so return the LargeObject input stream if ( fields[columnIndex - 1].getOID() == Oid.OID) { LargeObjectManager lom = connection.getLargeObjectAPI(); LargeObject lob = lom.open(getLong(columnIndex)); return lob.getInputStream(); } } return null; }
public synchronized InputStream getBinaryStream() throws SQLException { checkFreed(); LargeObject subLO = lo.copy(); subLOs.add(subLO); subLO.seek(0, LargeObject.SEEK_SET); return subLO.getInputStream(); }
public synchronized InputStream getBinaryStream() throws SQLException { checkFreed(); LargeObject subLO = lo.copy(); subLOs.add(subLO); subLO.seek(0, LargeObject.SEEK_SET); return subLO.getInputStream(); }
public InputStream getLOBStream(JDBCStore store, ResultSet rs, int column) throws SQLException { DelegatingConnection conn = (DelegatingConnection)store .getConnection(); conn.setAutoCommit(false); LargeObjectManager lom = getLargeObjectManager(conn); if (rs.getInt(column) != -1) { LargeObject lo = lom.open(rs.getInt(column)); return lo.getInputStream(); } else { return null; } }
public InputStream getLOBStream(JDBCStore store, ResultSet rs, int column) throws SQLException { DelegatingConnection conn = (DelegatingConnection)store .getConnection(); conn.setAutoCommit(false); LargeObjectManager lom = getLargeObjectManager(conn); if (rs.getInt(column) != -1) { LargeObject lo = lom.open(rs.getInt(column)); return lo.getInputStream(); } else { return null; } }
public InputStream getLOBStream(JDBCStore store, ResultSet rs, int column) throws SQLException { DelegatingConnection conn = (DelegatingConnection)store .getConnection(); conn.setAutoCommit(false); LargeObjectManager lom = getLargeObjectManager(conn); if (rs.getInt(column) != -1) { LargeObject lo = lom.open(rs.getInt(column)); return lo.getInputStream(); } else { return null; } }
public InputStream getLOBStream(JDBCStore store, ResultSet rs, int column) throws SQLException { DelegatingConnection conn = (DelegatingConnection)store .getConnection(); conn.setAutoCommit(false); LargeObjectManager lom = getLargeObjectManager(conn); if (rs.getInt(column) != -1) { LargeObject lo = lom.open(rs.getInt(column)); return lo.getInputStream(); } else { return null; } }
public InputStream getLOBStream(JDBCStore store, ResultSet rs, int column) throws SQLException { DelegatingConnection conn = (DelegatingConnection)store .getConnection(); conn.setAutoCommit(false); LargeObjectManager lom = ((PGConnection)conn.getInnermostDelegate()) .getLargeObjectAPI(); if (rs.getInt(column) != -1) { LargeObject lo = lom.open(rs.getInt(column)); return lo.getInputStream(); } else { return null; } }
/** * Postgres does not allow to read from the large object after the connection has been closed. */ private InputStream doReadPostgres(String path) { Handle h = dbi.open(); try { h.getConnection().setAutoCommit(false); List<Map<String, Object>> res = h.select("SELECT data FROM filestore WHERE path=?", path); Optional<Long> oid = res.stream() .map(row -> row.get("data")) .map(Long.class::cast) .findFirst(); if (oid.isPresent()) { LargeObjectManager lobj = getPostgresConnection(h.getConnection()).getLargeObjectAPI(); LargeObject obj = lobj.open(oid.get(), LargeObjectManager.READ); return new HandleCloserInputStream(h, obj.getInputStream()); } else { h.close(); return null; } } catch (SQLException e) { IOUtils.closeQuietly(h); throw ExtensionDataAccessException.launderThrowable(e); } }
/** * Postgres does not allow to read from the large object after the connection has been closed. */ private InputStream doReadPostgres(String path) { Handle h = dbi.open(); try { h.getConnection().setAutoCommit(false); List<Map<String, Object>> res = h.select("SELECT data FROM filestore WHERE path=?", path); Optional<Long> oid = res.stream() .map(row -> row.get("data")) .map(Long.class::cast) .findFirst(); if (oid.isPresent()) { LargeObjectManager lobj = getPostgresConnection(h.getConnection()).getLargeObjectAPI(); LargeObject obj = lobj.open(oid.get(), LargeObjectManager.READ); return new HandleCloserInputStream(h, obj.getInputStream()); } else { h.close(); return null; } } catch (SQLException e) { IOUtils.closeQuietly(h); throw DaoException.launderThrowable(e); } }
/** * Postgres does not allow to read from the large object after the connection has been closed. */ private InputStream doReadPostgres(String path) { Handle h = dbi.open(); try { h.getConnection().setAutoCommit(false); List<Map<String, Object>> res = h.select("SELECT data FROM filestore WHERE path=?", path); Optional<Long> oid = res.stream() .map(row -> row.get("data")) .map(Long.class::cast) .findFirst(); if (oid.isPresent()) { LargeObjectManager lobj = getPostgresConnection(h.getConnection()).getLargeObjectAPI(); LargeObject obj = lobj.open(oid.get(), LargeObjectManager.READ); return new HandleCloserInputStream(h, obj.getInputStream()); } else { h.close(); return null; } } catch (SQLException e) { IOUtils.closeQuietly(h); throw DaoException.launderThrowable(e); } }
/** * Postgres does not allow to read from the large object after the connection has been closed. */ private InputStream doReadPostgres(String id) { Handle h = dbi.open(); try { h.getConnection().setAutoCommit(false); List<Map<String, Object>> res = h.select("SELECT data FROM icon_filestore WHERE id=?", id); Optional<Long> oid = res.stream() .map(row -> row.get("data")) .map(Long.class::cast) .findFirst(); if (oid.isPresent()) { LargeObjectManager lobj = getPostgresConnection(h.getConnection()).getLargeObjectAPI(); LargeObject obj = lobj.open(oid.get(), LargeObjectManager.READ); return new HandleCloserInputStream(h, obj.getInputStream()); } else { h.close(); return null; } } catch (SQLException e) { IOUtils.closeQuietly(h); throw IconDataAccessException.launderThrowable(e); } }
public InputStream getBinaryStream(int columnIndex) throws SQLException { checkResultSet( columnIndex ); if (wasNullFlag) return null; if (connection.haveMinimumCompatibleVersion("7.2")) { //Version 7.2 supports BinaryStream for all PG bytea type //As the spec/javadoc for this method indicate this is to be used for //large binary values (i.e. LONGVARBINARY) PG doesn't have a separate //long binary datatype, but with toast the bytea datatype is capable of //handling very large values. Thus the implementation ends up calling //getBytes() since there is no current way to stream the value from the server byte b[] = getBytes(columnIndex); if (b != null) return new ByteArrayInputStream(b); } else { // In 7.1 Handle as BLOBS so return the LargeObject input stream if ( fields[columnIndex - 1].getOID() == Oid.OID) { LargeObjectManager lom = connection.getLargeObjectAPI(); LargeObject lob = lom.open(getLong(columnIndex)); return lob.getInputStream(); } } return null; }
public InputStream getBinaryStream(int columnIndex) throws SQLException { checkResultSet( columnIndex ); if (wasNullFlag) return null; if (connection.haveMinimumCompatibleVersion("7.2")) { //Version 7.2 supports BinaryStream for all PG bytea type //As the spec/javadoc for this method indicate this is to be used for //large binary values (i.e. LONGVARBINARY) PG doesn't have a separate //long binary datatype, but with toast the bytea datatype is capable of //handling very large values. Thus the implementation ends up calling //getBytes() since there is no current way to stream the value from the server byte b[] = getBytes(columnIndex); if (b != null) return new ByteArrayInputStream(b); } else { // In 7.1 Handle as BLOBS so return the LargeObject input stream if ( fields[columnIndex - 1].getOID() == Oid.OID) { LargeObjectManager lom = connection.getLargeObjectAPI(); LargeObject lob = lom.open(getLong(columnIndex)); return lob.getInputStream(); } } return null; }