List of usage examples for java.lang ArrayIndexOutOfBoundsException ArrayIndexOutOfBoundsException
public ArrayIndexOutOfBoundsException()
From source file:org.apache.axis2.format.WrappedTextNodeStreamReader.java
public boolean isAttributeSpecified(int index) { checkStartElement(); throw new ArrayIndexOutOfBoundsException(); }
From source file:org.apache.hadoop.io.compress.zlib.ZlibCompressor.java
@Override public int compress(byte[] b, int off, int len) throws IOException { if (b == null) { throw new NullPointerException(); }//from w ww . j av a 2 s .c om if (off < 0 || len < 0 || off > b.length - len) { throw new ArrayIndexOutOfBoundsException(); } int n = 0; // Check if there is compressed data n = compressedDirectBuf.remaining(); if (n > 0) { n = Math.min(n, len); ((ByteBuffer) compressedDirectBuf).get(b, off, n); return n; } // Re-initialize the zlib's output direct buffer compressedDirectBuf.rewind(); compressedDirectBuf.limit(directBufferSize); // Compress data n = deflateBytesDirect(); compressedDirectBuf.limit(n); // Check if zlib consumed all input buffer // set keepUncompressedBuf properly if (uncompressedDirectBufLen <= 0) { // zlib consumed all input buffer keepUncompressedBuf = false; uncompressedDirectBuf.clear(); uncompressedDirectBufOff = 0; uncompressedDirectBufLen = 0; } else { // zlib did not consume all input buffer keepUncompressedBuf = true; } // Get atmost 'len' bytes n = Math.min(n, len); ((ByteBuffer) compressedDirectBuf).get(b, off, n); return n; }
From source file:at.lame.hellonzb.parser.NzbParser.java
/** * Remove the DownloadFile object at the given vector's index. * // ww w . ja va 2 s.c o m * @param index The download file to remove is identified by this index value * @throws ArrayIndexOutOfBoundsException */ public synchronized void removeFileAt(int index) throws ArrayIndexOutOfBoundsException { if (index < 0 || index > (downloadFiles.size() - 1)) throw new ArrayIndexOutOfBoundsException(); downloadFiles.remove(index); }
From source file:org.lockss.hasher.SimpleHasher.java
/** * Handles the specification of the type of hashing operation to be performed. * /*from ww w . j a va 2s.co m*/ * @param params * A HasherParams with the parameters that define the hashing * operation. * @param result * A HasherResult where to store the result of the hashing operation. * @return a String with any error message. */ public String processHashTypeParam(HasherParams params, HasherResult result) { final String DEBUG_HEADER = "processHashTypeParam(): "; if (log.isDebug2()) log.debug2(DEBUG_HEADER + "Starting..."); HashType hashType = null; String errorMessage = null; if (StringUtil.isNullString(params.getHashType())) { hashType = DEFAULT_HASH_TYPE; } else if (StringUtils.isNumeric(params.getHashType())) { try { int hashTypeInt = Integer.parseInt(params.getHashType()); hashType = hashTypeCompat[hashTypeInt]; if (hashType == null) throw new ArrayIndexOutOfBoundsException(); params.setHashType(hashType.toString()); } catch (ArrayIndexOutOfBoundsException aioobe) { result.setRunnerStatus(HasherStatus.Error); errorMessage = "Unknown hash type: " + params.getHashType(); result.setRunnerError(errorMessage); return errorMessage; } catch (RuntimeException re) { result.setRunnerStatus(HasherStatus.Error); errorMessage = "Can't parse hash type: " + params.getHashType() + re.getMessage(); result.setRunnerError(errorMessage); return errorMessage; } } else { try { hashType = HashType.valueOf(params.getHashType()); } catch (IllegalArgumentException iae) { log.warning(DEBUG_HEADER, iae); result.setRunnerStatus(HasherStatus.Error); errorMessage = "Unknown hash type: " + params.getHashType() + " - " + iae.getMessage(); result.setRunnerError(errorMessage); return errorMessage; } } result.setHashType(hashType); return errorMessage; }
From source file:LZFInputStream.java
public void expand(byte[] in, int inPos, int inLen, byte[] out, int outPos, int outLen) { if (inPos < 0 || outPos < 0 || outLen < 0) { throw new IllegalArgumentException(); }/* ww w . j av a 2s.c om*/ do { int ctrl = in[inPos++] & 255; if (ctrl < MAX_LITERAL) { // literal run of length = ctrl + 1, ctrl++; // copy to output and move forward this many bytes System.arraycopy(in, inPos, out, outPos, ctrl); outPos += ctrl; inPos += ctrl; } else { // back reference // the highest 3 bits are the match length int len = ctrl >> 5; // if the length is maxed, add the next byte to the length if (len == 7) { len += in[inPos++] & 255; } // minimum back-reference is 3 bytes, // so 2 was subtracted before storing size len += 2; // ctrl is now the offset for a back-reference... // the logical AND operation removes the length bits ctrl = -((ctrl & 0x1f) << 8) - 1; // the next byte augments/increases the offset ctrl -= in[inPos++] & 255; // copy the back-reference bytes from the given // location in output to current position ctrl += outPos; if (outPos + len >= out.length) { // reduce array bounds checking throw new ArrayIndexOutOfBoundsException(); } for (int i = 0; i < len; i++) { out[outPos++] = out[ctrl++]; } } } while (outPos < outLen); }
From source file:org.apache.hadoop.mapreduce.task.reduce.TestFetcher.java
@SuppressWarnings("unchecked") @Test(timeout = 10000)/* ww w. j av a2 s . c om*/ public void testCopyFromHostOnAnyException() throws Exception { InMemoryMapOutput<Text, Text> immo = mock(InMemoryMapOutput.class); Fetcher<Text, Text> underTest = new FakeFetcher<Text, Text>(job, id, ss, mm, r, metrics, except, key, connection); String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key); when(connection.getResponseCode()).thenReturn(200); when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH)).thenReturn(replyHash); ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 10, 10, 1); ByteArrayOutputStream bout = new ByteArrayOutputStream(); header.write(new DataOutputStream(bout)); ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray()); when(connection.getInputStream()).thenReturn(in); when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME)) .thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME); when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION)) .thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION); when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt())).thenReturn(immo); doThrow(new ArrayIndexOutOfBoundsException()).when(immo).shuffle(any(MapHost.class), any(InputStream.class), anyLong(), anyLong(), any(ShuffleClientMetrics.class), any(Reporter.class)); underTest.copyFromHost(host); verify(connection).addRequestProperty(SecureShuffleUtils.HTTP_HEADER_URL_HASH, encHash); verify(ss, times(1)).copyFailed(map1ID, host, true, false); }
From source file:org.springframework.ldap.core.DistinguishedName.java
public Name getSuffix(int index) { if (index > names.size()) { throw new ArrayIndexOutOfBoundsException(); }/*from ww w .j a v a 2 s .c o m*/ LinkedList newNames = new LinkedList(); for (int i = index; i < names.size(); i++) { newNames.add(names.get(i)); } return new DistinguishedName(newNames); }
From source file:com.wikipy.utils.StringUtils.java
public static String[] subArray(String[] array, int fromIndex) { if (fromIndex > array.length - 1) { throw new ArrayIndexOutOfBoundsException(); }/*from w w w . j a va 2 s .c om*/ String[] newArr = new String[array.length - fromIndex]; System.arraycopy(array, fromIndex, newArr, 0, newArr.length); return newArr; }
From source file:it.sayservice.platform.smartplanner.otp.OTPHandler.java
public Map<String, WeekdayFilter> readAgencyWeekDay(String router, String agencyId) throws IOException { Map<String, WeekdayFilter> entries = new TreeMap<String, WeekdayFilter>(); List<String[]> lines = null; try {// w w w .java 2 s .c o m lines = readCSV(System.getenv("OTP_HOME") + System.getProperty("file.separator") + router + System.getProperty("file.separator") + Constants.SCHEDULES_FOLDER_PATH + System.getProperty("file.separator") + agencyId + System.getProperty("file.separator") + Constants.GTFS_CALENDAR); if (lines.size() <= 1) { // in case of empty calendar.txt file with just header throw exception throw new ArrayIndexOutOfBoundsException(); } } catch (Exception e) { // if calendar.txt is missing, construct entries using calendar_dates.txt List<String[]> linesEx = readCSV(System.getenv("OTP_HOME") + System.getProperty("file.separator") + router + System.getProperty("file.separator") + Constants.SCHEDULES_FOLDER_PATH + System.getProperty("file.separator") + agencyId + System.getProperty("file.separator") + Constants.GTFS_CALENDAR_DATE); Map<String, String> serviceStartDate = new HashMap<String, String>(); Map<String, String> serviceEndDate = new HashMap<String, String>(); // service_id,date,exception_type int serviceIdIndex = getFieldIndex(Constants.CDATES_SERVICE_ID, linesEx.get(0)); int dateIndex = getFieldIndex(Constants.CDATES_DATE, linesEx.get(0)); // int exceptionTypeIndex = getFieldIndex(Constants.CDATES_EXCEPTION_TYPE, lines.get(0)); boolean b[] = new boolean[7]; for (int i = 1; i < linesEx.size(); i++) { String[] words = linesEx.get(i); try { String name = agencyId + "_" + words[serviceIdIndex].trim(); // new String date = words[dateIndex].trim(); if (!serviceStartDate.containsKey(name)) { serviceStartDate.put(name, date); } else { serviceEndDate.put(name, date); } } catch (Exception e1) { System.out.println("Error parsing weekdays exception"); e1.printStackTrace(); } } for (String key : serviceStartDate.keySet()) { String name = key; String startDate = serviceStartDate.get(name); String endDate = null; if (serviceEndDate.containsKey(name)) { endDate = serviceEndDate.get(name); } else { endDate = startDate; } WeekdayFilter wdf = new WeekdayFilter(); wdf.setName(name); wdf.setDays(b); wdf.setFromDate(startDate); wdf.setToDate(endDate); entries.put(name, wdf); } } if (lines != null) { // service_id,date,exception_type int serviceIdIndex = getFieldIndex(Constants.CAL_SERVICE_ID, lines.get(0)); // int monIndex = getFieldIndex(Constants.CAL_MON, lines.get(0)); // int tueIndex = getFieldIndex(Constants.CAL_TUE, lines.get(0)); // int wedIndex = getFieldIndex(Constants.CAL_WED, lines.get(0)); // int thrIndex = getFieldIndex(Constants.CAL_THR, lines.get(0)); // int friIndex = getFieldIndex(Constants.CAL_FRI, lines.get(0)); // int satIndex = getFieldIndex(Constants.CAL_SAT, lines.get(0)); // int sunIndex = getFieldIndex(Constants.CAL_SUN, lines.get(0)); int startDateIndex = getFieldIndex(Constants.CAL_START_DATE, lines.get(0)); int endDateIndex = getFieldIndex(Constants.CAL_END_DATE, lines.get(0)); for (int i = 1; i < lines.size(); i++) { String[] words = lines.get(i); try { String name = agencyId + "_" + words[serviceIdIndex]; // new boolean b[] = new boolean[7]; for (int d = 1; d < 8; d++) { b[d - 1] = words[d].equals("1") ? true : false; } String startDate = words[startDateIndex].trim(); //8 String endDate = words[endDateIndex].trim(); //9 WeekdayFilter wdf = new WeekdayFilter(); wdf.setName(name); wdf.setDays(b); wdf.setFromDate(startDate); wdf.setToDate(endDate); entries.put(name, wdf); } catch (Exception e) { System.out.println("Error parsing weekdays filter"); e.printStackTrace(); } } } return entries; }