Skip to content

Commit

Permalink
BUG: Don't over-optimize memory with jagged CSV (pandas-dev#23527)
Browse files Browse the repository at this point in the history
With jagged CSV's, we risk being too quick
to dump memory that we need to allocate
because previous chunks would have
indicated much larger rows than we can
anticipate in subsequent chunks.

Closes pandas-devgh-23509.
  • Loading branch information
gfyoung authored and Pingviinituutti committed Feb 28, 2019
1 parent 7c8bf8d commit 8583846
Show file tree
Hide file tree
Showing 5 changed files with 50 additions and 2 deletions.
1 change: 1 addition & 0 deletions doc/source/whatsnew/v0.24.0.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1299,6 +1299,7 @@ Notice how we now instead output ``np.nan`` itself instead of a stringified form
- :func:`read_excel()` will correctly show the deprecation warning for previously deprecated ``sheetname`` (:issue:`17994`)
- :func:`read_csv()` and func:`read_table()` will throw ``UnicodeError`` and not coredump on badly encoded strings (:issue:`22748`)
- :func:`read_csv()` will correctly parse timezone-aware datetimes (:issue:`22256`)
- Bug in :func:`read_csv()` in which memory management was prematurely optimized for the C engine when the data was being read in chunks (:issue:`23509`)
- :func:`read_sas()` will parse numbers in sas7bdat-files that have width less than 8 bytes correctly. (:issue:`21616`)
- :func:`read_sas()` will correctly parse sas7bdat files with many columns (:issue:`22628`)
- :func:`read_sas()` will correctly parse sas7bdat files with data page types having also bit 7 set (so page type is 128 + 256 = 384) (:issue:`16615`)
Expand Down
1 change: 1 addition & 0 deletions pandas/_libs/parsers.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,7 @@ cdef extern from "parser/tokenizer.h":
int64_t *word_starts # where we are in the stream
int64_t words_len
int64_t words_cap
int64_t max_words_cap # maximum word cap encountered

char *pword_start # pointer to stream start of current field
int64_t word_start # position start of current field
Expand Down
33 changes: 31 additions & 2 deletions pandas/_libs/src/parser/tokenizer.c
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,7 @@ int parser_init(parser_t *self) {
sz = sz ? sz : 1;
self->words = (char **)malloc(sz * sizeof(char *));
self->word_starts = (int64_t *)malloc(sz * sizeof(int64_t));
self->max_words_cap = sz;
self->words_cap = sz;
self->words_len = 0;

Expand Down Expand Up @@ -247,7 +248,7 @@ void parser_del(parser_t *self) {
}

static int make_stream_space(parser_t *self, size_t nbytes) {
int64_t i, cap;
int64_t i, cap, length;
int status;
void *orig_ptr, *newptr;

Expand Down Expand Up @@ -287,8 +288,23 @@ static int make_stream_space(parser_t *self, size_t nbytes) {
*/

cap = self->words_cap;

/**
* If we are reading in chunks, we need to be aware of the maximum number
* of words we have seen in previous chunks (self->max_words_cap), so
* that way, we can properly allocate when reading subsequent ones.
*
* Otherwise, we risk a buffer overflow if we mistakenly under-allocate
* just because a recent chunk did not have as many words.
*/
if (self->words_len + nbytes < self->max_words_cap) {
length = self->max_words_cap - nbytes;
} else {
length = self->words_len;
}

self->words =
(char **)grow_buffer((void *)self->words, self->words_len,
(char **)grow_buffer((void *)self->words, length,
(int64_t*)&self->words_cap, nbytes,
sizeof(char *), &status);
TRACE(
Expand Down Expand Up @@ -1241,6 +1257,19 @@ int parser_trim_buffers(parser_t *self) {

int64_t i;

/**
* Before we free up space and trim, we should
* save how many words we saw when parsing, if
* it exceeds the maximum number we saw before.
*
* This is important for when we read in chunks,
* so that we can inform subsequent chunk parsing
* as to how many words we could possibly see.
*/
if (self->words_cap > self->max_words_cap) {
self->max_words_cap = self->words_cap;
}

/* trim words, word_starts */
new_cap = _next_pow2(self->words_len) + 1;
if (new_cap < self->words_cap) {
Expand Down
1 change: 1 addition & 0 deletions pandas/_libs/src/parser/tokenizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,7 @@ typedef struct parser_t {
int64_t *word_starts; // where we are in the stream
int64_t words_len;
int64_t words_cap;
int64_t max_words_cap; // maximum word cap encountered

char *pword_start; // pointer to stream start of current field
int64_t word_start; // position start of current field
Expand Down
16 changes: 16 additions & 0 deletions pandas/tests/io/parser/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -458,6 +458,22 @@ def test_read_chunksize_generated_index(self):

tm.assert_frame_equal(pd.concat(reader), df)

def test_read_chunksize_jagged_names(self):
# see gh-23509
data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
reader = self.read_csv(StringIO(data), names=range(10), chunksize=4)

expected = DataFrame()

for i in range(10):
if i == 0:
expected[i] = [0] * 8
else:
expected[i] = [np.nan] * 7 + [0]

result = pd.concat(reader)
tm.assert_frame_equal(result, expected)

def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
Expand Down

0 comments on commit 8583846

Please sign in to comment.