public TableNotFoundException(TableNotFoundException e, long timestamp) { this(e.getSchemaName(),e.getTableName(), timestamp); }
@Override public TableRef resolveTable(String schemaName, String tableName) throws SQLException { throw new TableNotFoundException(schemaName, tableName); }
assertTrue("Expected TableNotFoundException since drop table goes through first", e instanceof TableNotFoundException && fullTableName.equals(((TableNotFoundException) e).getTableName()));
return new SingleTableColumnResolver(connection, tableNode, e.getTimeStamp(), new HashMap<String, UDFParseNode>(1), isNamespaceMapped);
return new SingleTableColumnResolver(connection, tableNode, e.getTimeStamp(), new HashMap<String, UDFParseNode>(1), isNamespaceMapped);
@Override public TableRef resolveTable(String schemaName, String tableName) throws SQLException { String fullTableName = SchemaUtil.getTableName(schemaName, tableName); List<TableRef> tableRefs = tableMap.get(fullTableName); if (tableRefs.size() == 0) { throw new TableNotFoundException(fullTableName); } else if (tableRefs.size() > 1) { throw new AmbiguousTableException(tableName); } else { return tableRefs.get(0); } }
} catch (org.apache.phoenix.schema.TableNotFoundException tnf) { logger.error("Table might be already be deleted via cascade. Schema: " + tnf.getSchemaName() + " Table: " + tnf.getTableName());
return new SingleTableColumnResolver(connection, tableNode, e.getTimeStamp(), new HashMap<String, UDFParseNode>(1), isNamespaceMapped);
@Override public PTableRef getTableRef(PTableKey key) throws TableNotFoundException { PTableRef ref = metaData.get(key); if (ref == null) { throw new TableNotFoundException(key.getName()); } return ref; }
public TableNotFoundException(TableNotFoundException e, long timestamp) { this(e.getSchemaName(),e.getTableName(), timestamp); }
@Override public SQLException newException(SQLExceptionInfo info) { return new TableNotFoundException(info.getSchemaName(), info.getTableName()); } }),
public TableNotFoundException(TableNotFoundException e, long timestamp) { this(e.getSchemaName(),e.getTableName(), timestamp); }
@Override public TableDescriptor getTableDescriptor(byte[] tableName) throws SQLException { Table htable = getTable(tableName); try { return htable.getDescriptor(); } catch (IOException e) { if(e instanceof org.apache.hadoop.hbase.TableNotFoundException || e.getCause() instanceof org.apache.hadoop.hbase.TableNotFoundException) { byte[][] schemaAndTableName = new byte[2][]; SchemaUtil.getVarChars(tableName, schemaAndTableName); throw new TableNotFoundException(Bytes.toString(schemaAndTableName[0]), Bytes.toString(schemaAndTableName[1])); } throw new RuntimeException(e); } finally { Closeables.closeQuietly(htable); } }
@Override public Table getTable(byte[] tableName) throws SQLException { try { return HBaseFactoryProvider.getHTableFactory().getTable(tableName, connection, null); } catch (org.apache.hadoop.hbase.TableNotFoundException e) { throw new TableNotFoundException(SchemaUtil.getSchemaNameFromFullName(tableName), SchemaUtil.getTableNameFromFullName(tableName)); } catch (IOException e) { throw new SQLException(e); } }
@Override public TableRef resolveTable(String schemaName, String tableName) throws SQLException { TableRef tableRef = tableRefs.get(0); /* * The only case we can definitely verify is when both a schemaName and a tableName * are provided. Otherwise, the tableName might be a column family. In this case, * this will be validated by resolveColumn. */ if (schemaName != null || tableName != null) { String resolvedTableName = tableRef.getTable().getTableName().getString(); String resolvedSchemaName = tableRef.getTable().getSchemaName().getString(); if (schemaName != null && tableName != null) { if ( ! ( schemaName.equals(resolvedSchemaName) && tableName.equals(resolvedTableName) ) && ! schemaName.equals(alias) ) { throw new TableNotFoundException(schemaName, tableName); } } } return tableRef; }
private ColumnFamilyRef resolveColumnFamily(String tableName, String cfName) throws SQLException { if (tableName == null) { ColumnFamilyRef theColumnFamilyRef = null; Iterator<TableRef> iterator = tables.iterator(); while (iterator.hasNext()) { TableRef tableRef = iterator.next(); try { PColumnFamily columnFamily = tableRef.getTable().getColumnFamily(cfName); if (columnFamily == null) { throw new TableNotFoundException(cfName); } theColumnFamilyRef = new ColumnFamilyRef(tableRef, columnFamily); } catch (ColumnFamilyNotFoundException e) {} } if (theColumnFamilyRef != null) { return theColumnFamilyRef; } throw new TableNotFoundException(cfName); } else { TableRef tableRef = null; try { tableRef = resolveTable(null, tableName); } catch (TableNotFoundException e) { return resolveColumnFamily(null, cfName); } PColumnFamily columnFamily = tableRef.getTable().getColumnFamily(cfName); return new ColumnFamilyRef(tableRef, columnFamily); } }
PTable table = PhoenixRuntime.getTable(metaConn, fullParentTableName); if (table==null) { throw new TableNotFoundException(fullParentTableName);
public static PTable getTableNoCache(Connection conn, String name) throws SQLException { String schemaName = SchemaUtil.getSchemaNameFromFullName(name); String tableName = SchemaUtil.getTableNameFromFullName(name); PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); MetaDataMutationResult result = new MetaDataClient(pconn).updateCache(pconn.getTenantId(), schemaName, tableName, true); if(result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { throw new TableNotFoundException(schemaName, tableName); } return result.getTable(); }
private void addIndexToTable(PName tenantId, PName schemaName, PName indexName, PName tableName, long clientTimeStamp, List<PTable> indexes, int clientVersion, boolean skipAddingParentColumns) throws IOException, SQLException { byte[] tenantIdBytes = tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes(); PTable indexTable = doGetTable(tenantIdBytes, schemaName.getBytes(), indexName.getBytes(), clientTimeStamp, null, clientVersion, false, skipAddingParentColumns, null); if (indexTable == null) { ServerUtil.throwIOException("Index not found", new TableNotFoundException(schemaName.getString(), indexName.getString())); return; } indexes.add(indexTable); }
} catch (org.apache.hadoop.hbase.TableNotFoundException e) { String fullName = Bytes.toString(tableName); throw new TableNotFoundException(fullName); } catch (IOException e) { if (retryCount++ < maxRetryCount) { // One retry, in case split occurs while navigating