Example usage for io.netty.util CharsetUtil ISO_8859_1

List of usage examples for io.netty.util CharsetUtil ISO_8859_1

Introduction

In this page you can find the example usage for io.netty.util CharsetUtil ISO_8859_1.

Prototype

Charset ISO_8859_1

To view the source code for io.netty.util CharsetUtil ISO_8859_1.

Click Source Link

Document

ISO Latin Alphabet No.

Usage

From source file:com.tesora.dve.sql.util.MysqlConnectionResource.java

License:Open Source License

private void init(String url, String userName, String password, boolean useUTF8) throws Throwable {
    this.url = url;
    this.userName = userName;
    this.password = password;

    if (useUTF8) {
        this.encoding = CharsetUtil.UTF_8;
        addPostConnectCmd("SET NAMES utf8");
    } else {/*from  w ww  .  ja v  a  2s.  co m*/
        this.encoding = CharsetUtil.ISO_8859_1;
        addPostConnectCmd("SET NAMES latin1");
    }

    mysqlConn = new MysqlConnection(new TestStorageSite());

    connect();
}

From source file:com.tesora.dve.tools.analyzer.sources.FileSource.java

License:Open Source License

private static void mysqlLogFile(InputStream is, ParserInvoker invoker) throws Throwable {
    new FileParser().parseOneMysqlLogFile(is, invoker, CharsetUtil.ISO_8859_1);
}

From source file:com.tesora.dve.tools.libmy.AsyncExample.java

License:Open Source License

protected static JavaCharsetCatalog constructJavaCharsetCatalog() {
    return new JavaCharsetCatalog() {
        @Override/*w  w  w. ja v a 2  s  . c  o  m*/
        public Charset findJavaCharsetById(int clientCharsetId) {
            if (clientCharsetId == 33) {
                return CharsetUtil.UTF_8;
            } else if (clientCharsetId == 8) {
                return CharsetUtil.ISO_8859_1;
            } else {
                return null;
            }
        }
    };
}

From source file:com.tesora.dve.tools.logfile.DVELogFileUtility.java

License:Open Source License

private static void concatenate(String[] left, String right, boolean dir) throws Throwable {
    // so, what we're going to do is just read in the lhs, and write them out to the rhs, adjusting line numbers as we go
    // new FileParser().parseOneFilePELog(InputStream in, ParserInvoker invoker) throws Throwable {
    FileOutputStream fos = new FileOutputStream(right);
    PrintWriter out = new PrintWriter(new OutputStreamWriter(fos, CharsetUtil.ISO_8859_1), true);
    long offset = 0L;
    ArrayList<File> infiles = new ArrayList<File>();
    for (String l : left) {
        if (dir) {
            File srcdir = new File(l);
            File[] chilluns = srcdir.listFiles();
            for (File f : chilluns) {
                if (f.getName().endsWith(".pelog"))
                    infiles.add(f);// ww w  .j  a v  a2 s .com
            }
        } else {
            infiles.add(new File(l));
        }
    }
    for (File f : infiles) {
        System.out.println("Concatenating " + f.getAbsolutePath() + " onto " + right);
        try (final FileInputStream fis = new FileInputStream(f)) {
            AdjustingParserInvoker api = new AdjustingParserInvoker(out, offset);
            new FileParser().parseOneFilePELog(fis, api);
            offset = api.getLastLine() + 1; // + 1 so the 0 line next time around is correct
        }
    }
    out.close();
    fos.close();
}

From source file:com.tesora.dve.worker.MysqlTextResultChunkProvider.java

License:Open Source License

public void consumeRowText(MyTextResultRow textRow) {
    if (getRowDataList().size() >= getResultsLimit())
        return;//from   w  w w .j a v  a  2s.co m

    ArrayList<String> row = new ArrayList<String>(getFieldCount());
    ResultRow resultRow = new ResultRow();
    String colValue;
    Object colValueObj;
    for (int i = 0; i < getFieldCount(); ++i) {
        ColumnMetadata cMd = columnSet.getColumn(i + 1);
        if (cMd.isBinaryType()) {
            colValueObj = textRow.getBytes(i);
            colValue = textRow.getString(i, CharsetUtil.ISO_8859_1);
        } else {
            colValue = textRow.getString(i, CharsetUtil.UTF_8);
            try {
                colValueObj = colValue == null ? null
                        : DBTypeBasedUtils
                                .getMysqlTypeFunc(MyFieldType.fromByte((byte) cMd.getNativeTypeId()),
                                        cMd.getSize(), cMd.getNativeTypeFlags())
                                .convertStringToObject(colValue, cMd);
            } catch (PEException e) {
                throw new PECodingException("Problem finding value conversion function", e);
            }
        }
        resultRow.addResultColumn(new ResultColumn(colValueObj));
        row.add(i, colValue);
    }
    getRowDataList().add(row);
    chunk.addResultRow(resultRow);
}

From source file:me.nithanim.cultures.format.cif.CifFileWriter.java

@Override
public void pack(CifFile o, ByteBuf buf) throws IOException {
    List<String> lines = o.getLines();

    ByteBufAllocator alloc = ByteBufAllocator.DEFAULT;
    ByteBuf indexTable = alloc.buffer(o.getLines().size()).order(ByteOrder.LITTLE_ENDIAN);
    ByteBuf contentTable = alloc.buffer(o.getLines().size() * 10).order(ByteOrder.LITTLE_ENDIAN);
    if (o.getFileFormat() == CifFile.FileFormat.CIF) {
        for (String l : lines) {
            int idx = contentTable.writerIndex();
            ByteBufUtil.hexDump(contentTable);
            indexTable.writeInt(idx);//from  ww  w.  jav a2s.  c om

            l = l.trim();
            if (l.startsWith("[")) {
                l = l.substring(1, l.length() - 1);
                contentTable.writeByte(1);
            } else {
                contentTable.writeByte(2);
            }

            contentTable.writeBytes(l.getBytes(CharsetUtil.ISO_8859_1));
            contentTable.writeByte('\0');
        }
    } else {
        for (String l : lines) {
            int idx = contentTable.writerIndex();
            indexTable.writeInt(idx);

            l = l.trim();

            contentTable.writeBytes(l.getBytes(CharsetUtil.ISO_8859_1));
            contentTable.writeByte('\0');
        }
    }
    EncryptedInformation ei = new EncryptedInformation(o.getLines().size(), indexTable.writerIndex(),
            indexTable, contentTable.writerIndex(), contentTable);

    Writer<EncryptedInformation> eiw;
    if (o.getInternalFormat() == CifFile.InternalFormat.TYPE1) {
        buf.writeInt(65601);
        eiw = type1Writer;
    } else if (o.getInternalFormat() == CifFile.InternalFormat.TYPE2) {
        buf.writeInt(1021);
        eiw = type2Writer;
    } else {
        throw new UnsupportedDataTypeException("The given data is not a cif file!");
    }
    eiw.pack(ei, buf);
}

From source file:org.apache.tajo.storage.TestSplitProcessor.java

License:Apache License

@Test
public void testFieldSplitProcessor() throws IOException {
    String data = "abc||de|";
    final ByteBuf buf = releaseLater(Unpooled.copiedBuffer(data, CharsetUtil.ISO_8859_1));

    final int len = buf.readableBytes();
    FieldSplitProcessor processor = new FieldSplitProcessor((byte) '|');

    assertEquals(3, buf.forEachByte(0, len, processor));
    assertEquals(4, buf.forEachByte(4, len - 4, processor));
    assertEquals(7, buf.forEachByte(5, len - 5, processor));
    assertEquals(-1, buf.forEachByte(8, len - 8, processor));
}

From source file:org.apache.tajo.storage.TestSplitProcessor.java

License:Apache License

@Test
public void testMultiCharFieldSplitProcessor1() throws IOException {
    String data = "abc||||de||";
    final ByteBuf buf = releaseLater(Unpooled.copiedBuffer(data, CharsetUtil.ISO_8859_1));

    final int len = buf.readableBytes();
    ByteBufProcessor processor = new MultiBytesFieldSplitProcessor("||".getBytes());

    assertEquals(4, buf.forEachByte(0, len, processor));
    assertEquals(6, buf.forEachByte(5, len - 5, processor));
    assertEquals(10, buf.forEachByte(7, len - 7, processor));
    assertEquals(-1, buf.forEachByte(11, len - 11, processor));
}

From source file:org.apache.tajo.storage.TestSplitProcessor.java

License:Apache License

@Test
public void testLineSplitProcessor() throws IOException {
    String data = "abc\r\n\n";
    final ByteBuf buf = releaseLater(Unpooled.copiedBuffer(data, CharsetUtil.ISO_8859_1));

    final int len = buf.readableBytes();
    LineSplitProcessor processor = new LineSplitProcessor();

    //find CR/*from  w  w  w.ja  va 2s.  c  o m*/
    assertEquals(3, buf.forEachByte(0, len, processor));

    // find CRLF
    assertEquals(4, buf.forEachByte(4, len - 4, processor));
    assertEquals(buf.getByte(4), '\n');
    // need to skip LF
    assertTrue(processor.isPrevCharCR());

    // find LF
    assertEquals(5, buf.forEachByte(5, len - 5, processor)); //line length is zero
}

From source file:org.graylog2.inputs.transports.netty.LenientDelimiterBasedFrameDecoderTest.java

License:Open Source License

@Test
public void testFailSlowTooLongFrameRecovery() throws Exception {
    EmbeddedChannel ch = new EmbeddedChannel(
            new LenientDelimiterBasedFrameDecoder(1, true, false, false, Delimiters.nulDelimiter()));

    for (int i = 0; i < 2; i++) {
        ch.writeInbound(Unpooled.wrappedBuffer(new byte[] { 1, 2 }));
        try {/*from   w ww .  j a va2 s .co  m*/
            assertTrue(ch.writeInbound(Unpooled.wrappedBuffer(new byte[] { 0 })));
            fail(DecoderException.class.getSimpleName() + " must be raised.");
        } catch (TooLongFrameException e) {
            // Expected
        }

        ch.writeInbound(Unpooled.wrappedBuffer(new byte[] { 'A', 0 }));
        ByteBuf buf = ch.readInbound();
        assertEquals("A", buf.toString(CharsetUtil.ISO_8859_1));

        buf.release();
    }
}