Welcome to OGeek Q&A Community for programmer and developer-Open, Learning and Share
Welcome To Ask or Share your Answers For Others

Categories

0 votes
188 views
in Technique[技术] by (71.8m points)

row not consolidating duplicates in R when using multiple months in Date Filter

I am using the following code to summarize my data by a column

library(data.table, warn.conflicts = FALSE)
library(lubridate, warn.conflicts = FALSE)

################
## PARAMETERS ##
################

# Set path of major source folder for raw transaction data
in_directory <- "C:/Users/NAME/Documents/Raw Data/"

# List names of sub-folders (currently grouped by first two characters of CUST_ID)
in_subfolders <- list("AA-CA", "CB-HZ")

# Set location for output
out_directory <- "C:/Users/NAME/Documents/YTD Master/"
out_filename <- "OUTPUT.csv"

# Set beginning and end of date range to be collected - year-month-day format
date_range <- interval(as.Date("2017-01-01"), as.Date("2017-01-31"))

# Enable or disable filtering of raw files to only grab items bought within certain months to save space.
# If false, all files will be scanned for unique items, which will take longer and be a larger file.
date_filter <- TRUE


##########
## CODE ##
##########

starttime <- Sys.time()
mastertable <- NULL

for (j in 1:length(in_subfolders)) {
  subfolder <- in_subfolders[j]
  sub_directory <- paste0(in_directory, subfolder, "/")

  ## IMPORT DATA
  in_filenames <- dir(sub_directory, pattern =".txt")

  for (i in 1:length(in_filenames)) {

    # Default value provided for when fast filtering is disabled.
    read_this_file <- TRUE

    # To fast filter the data, we choose to include or exclude an entire file based on the date of its first line.
    # WARNING: This is only a valid method if filtering by entire months, since that is the amount of data housed in each file.
    if (date_filter) {
      temptable <- fread(paste0(sub_directory, in_filenames[i]), colClasses=c(CUSTOMER_TIER = "character"),
                         na.strings = "", nrows = 1)
      temptable[, INVOICE_DT := as.Date(INVOICE_DT)]

      # If date matches, set read flag to TRUE.  If date does not match, set read flag to FALSE.
      read_this_file <- temptable[, INVOICE_DT] %within% date_range
    }


    if (read_this_file) {
      print(Sys.time()-starttime)
      print(paste0("Reading in ", in_filenames[i]))
      temptable <- fread(paste0(sub_directory, in_filenames[i]), colClasses = c(CUSTOMER_TIER = "character"),
                         na.strings = "")


      temptable <- temptable[,lapply(.SD, sum), by = .(CUST_ID),
                                         .SDcols = c("Ext Sale")]

      # Combine into full list
      mastertable <- rbindlist(list(mastertable, temptable), use.names = TRUE)
      # Release unneeded memory
      rm(temptable)

    }

  }

}

# Save Final table
print("Saving master table")
fwrite(mastertable, paste0(out_directory, out_filename))
rm(mastertable)

print(Sys.time()-starttime)

The output i receive after running the above script for the month of January is as below and this is the output I expect.

CUST_ID Ext Sale
AK0010001   209.97
CO0020001   1540.3

The problem arises when i use multiple months. Below is the output I receive when I run Jan-Feb date_range <- interval(as.Date("2017-01-01"), as.Date("2017-02-28"))

CUST_ID Ext Sale
AK0010001   209.97
AK0010001   217.833
CO0020001   1540.3
CO0010001   -179.765

As you can see in the output above the CUST_ID is no longer consolidating.

Does anyone know why this would be happening?

Below I have provided some data to reproduce what I am working with. Just save the files into 4 separate text file and into folders like I have it in my code.

I have 2 separate folders saved as "AA-CA" and "CB-HZ"

File 1 saved as "AA-CA 2017-01.txt"

INVOICE_DT,BRANCH_CODE,INVOICE_NO,INV_SEQ_NO,INV_ITEM_ID,ITEM_DESCR,STD_ITEM,PRIVATE_LABEL,CATEGORY_PATH1,CATEGORY_PATH2,CUST_ID,CUSTOMER_TIER,IS_VENDING,SALE_PRICE,TOTAL_COST,POS_COST,CE100,CE110,CE120,CE200,CORP_PRICE,QTY_SOLD,PACKSLIP_WHSL,PRICING_GROUP,PGG_MIN_PRICE,PGY_MIN_PRICE,PGR_MIN_PRICE,Ext Sale,Ext Total Cost
2017-01-27,AK001,AK0016997,4,12772-00079,"3.75"""""""" 4.12"""""""" HOSE OD",N,N,08.5-Fleet & Automotive,01.6-DOT Hose & Tubing,AK0010001,Tier 3,No,42.74,22.438335,22.438335,21.37,,,0,,3,,PGR,168.2875125,134.63001,112.191675,128.22,67.315005
2017-01-27,AK001,AK0016997,3,12772-00022,"2.5"""""""" 2.87"""""""" HOSE OD C",N,N,08-Hydraulics & Pneumatics,02-Hose and Hose Reels,AK0010001,Tier 3,No,27.25,14.143396,14.143396,13.47,,,0,,3,,PGR,106.07547,84.860376,70.71698,81.75,42.430188

File 2 saved as "AA-CA 2017-02.txt"

INVOICE_DT,BRANCH_CODE,INVOICE_NO,INV_SEQ_NO,INV_ITEM_ID,ITEM_DESCR,STD_ITEM,PRIVATE_LABEL,CATEGORY_PATH1,CATEGORY_PATH2,CUST_ID,CUSTOMER_TIER,IS_VENDING,SALE_PRICE,TOTAL_COST,POS_COST,CE100,CE110,CE120,CE200,CORP_PRICE,QTY_SOLD,PACKSLIP_WHSL,PRICING_GROUP,PGG_MIN_PRICE,PGY_MIN_PRICE,PGR_MIN_PRICE,Ext Sale,Ext Total Cost
2017-02-28,AK001,AK0017107,1,12772-00307,3-WAY MALE HOUSING,N,N,09-Electrical,05.5-Terminals and Wire Connectors,AK0010001,Tier 3,No,95.21,74.591453,74.591453,71.04,,,0,,1,,PGG,0,0,0,95.21,74.591453
2017-02-28,AK001,AK0017105,3,99523968,PC58570 1/2 PRS BALL,Y,N,,,AK0010001,Tier 3,No,24.5246,12.356039,12.356039,11.767743,,,0,,5,,PGG,0,0,0,122.623,61.780195

File 3 saved as "CB-HZ 2017-01.txt"

INVOICE_DT,BRANCH_CODE,INVOICE_NO,INV_SEQ_NO,INV_ITEM_ID,ITEM_DESCR,STD_ITEM,PRIVATE_LABEL,CATEGORY_PATH1,CATEGORY_PATH2,CUST_ID,CUSTOMER_TIER,IS_VENDING,SALE_PRICE,TOTAL_COST,POS_COST,CE100,CE110,CE120,CE200,CORP_PRICE,QTY_SOLD,PACKSLIP_WHSL,PRICING_GROUP,PGG_MIN_PRICE,PGY_MIN_PRICE,PGR_MIN_PRICE,Ext Sale,Ext Total Cost
2017-01-31,CO002,CO0023603,19,13117-00095,8-32X5/16 BHSCS MAG,N,N,18-Work Order Parts,Finished Products,CO0020001,Tier 3,No,0.1858,0.037528,0.037528,0.01833,,,0,,6000,,PGG,0,0,0,1114.8,225.168
2017-01-31,CO002,CO0023603,20,13117-00186,"#8-16X3/4"""""""" 6-LOBE PA",N,N,01-Fasteners,03-Screws,CO0020001,Tier 3,No,0.0851,0.029652,0.029652,,,,0,,5000,,PGG,0,0,0,425.5,148.26

File 4 saved as "CB-HZ 2017-02.txt"

INVOICE_DT,BRANCH_CODE,INVOICE_NO,INV_SEQ_NO,INV_ITEM_ID,ITEM_DESCR,STD_ITEM,PRIVATE_LABEL,CATEGORY_PATH1,CATEGORY_PATH2,CUST_ID,CUSTOMER_TIER,IS_VENDING,SALE_PRICE,TOTAL_COST,POS_COST,CE100,CE110,CE120,CE200,CORP_PRICE,QTY_SOLD,PACKSLIP_WHSL,PRICING_GROUP,PGG_MIN_PRICE,PGY_MIN_PRICE,PGR_MIN_PRICE,Ext Sale,Ext Total Cost
2017-02-03,CO001,CO0019017,1,MN2550000A20000,M6-1.0 HEX NUT A-2,Y,N,01-Fasteners,04-Nuts,CO0010001,NA,No,0.0313,0.00767,0.00767,0.006215,0.000593,,0.001241,,-50,0.1058,,,,,-1.565,-0.3835
2017-02-16,CO001,CO0019018,1,11516769,RS37518BlkRndSpacer,Y,N,01.5-Hardware,Electronic Hardware,CO0010001,NA,No,0.0396,0.011245,0.011245,0.01071,,,0,,-4500,0.0543,,,,,-178.2,-50.6025

I have the data saved in 2 separate folders.

See Question&Answers more detail:os

与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…
Welcome To Ask or Share your Answers For Others

1 Reply

0 votes
by (71.8m points)

The OP is wondering why the result is not consolidated for CUST_ID if more than one month of data is processed.

The reason is that the monthly files are read in and aggregated one by one but a final aggregation step is needed to consolidate over all months.

The code below is a simplified replacement of the double for loops. I have left out the code for testing for "fast filtering".

The first part creates a list of files to be processed. The second part does the processing.

# create vector of filenames to be processed
in_filenames <- list.files(
  file.path(in_directory, in_subfolders), 
  pattern = "\.txt$", 
  full.names = TRUE, 
  recursive = TRUE)

# read and aggregate each file separately
mastertable <- rbindlist(
  lapply(in_filenames, function(fn) {
    # code for "fast filter" test goes here
    message("Reading in ", fn)
    temptable <- fread(fn,
                       colClasses = c(CUSTOMER_TIER = "character"),
                       na.strings = "")
    # aggregate 
    temptable[, lapply(.SD, sum), by = .(CUST_ID), .SDcols = c("Ext Sale")]
  })
)[
  # THIS IS THE MISSING STEP:
  # second aggregation for overall totals
  , lapply(.SD, sum), by = .(CUST_ID), .SDcols = c("Ext Sale")]
Processing file: Raw Data/AA-CA/AA-CA 2017-01.txt
Processing file: Raw Data/AA-CA/AA-CA 2017-02.txt
Processing file: Raw Data/CB-HZ/CB-HZ 2017-01.txt
Processing file: Raw Data/CB-HZ/CB-HZ 2017-02.txt
mastertable
     CUST_ID Ext Sale
1: AK0010001  427.803
2: CO0020001 1540.300
3: CO0010001 -179.765

Note that chaining of data.table expressions is used here.


Edit 1:

By request of the OP, here is the complete code (except for the "fast filtering" stuff). There are some additional lines which where modified. They are marked with ### MODIFIED.

library(data.table, warn.conflicts = FALSE)
library(lubridate, warn.conflicts = FALSE)

################
## PARAMETERS ##
################

# Set path of major source folder for raw transaction data
in_directory <- "Raw Data"   ### MODIFIED

# List names of sub-folders (currently grouped by first two characters of CUST_ID)
in_subfolders <- list("AA-CA", "CB-HZ")

# Set location for output
out_directory <- "YTD Master"   ### MODIFIED
out_filename <- "OUTPUT.csv"

# Set beginning and end of date range to be collected - year-month-day format
date_range <- interval(as.Date("2017-01-01"), as.Date("2017-02-28"))   ### MODIFIED

# Enable or disable filtering of raw files to only grab items bought within certain months to save space.
# If false, all files will be scanned for unique items, which will take longer and be a larger file.
date_filter <- TRUE


##########
## CODE ##
##########

starttime <- Sys.time()

# create vector of filenames to be processed
in_filenames <- list.files(
  file.path(in_directory, in_subfolders), 
  pattern = "\.txt$", 
  full.names = TRUE, 
  recursive = TRUE)

# read and aggregate each file separetely
mastertable <- rbindlist(
  lapply(in_filenames, function(fn) {
    # code for fast filter test goes here
    message("Processing file: ", fn)
    temptable <- fread(fn,
                       colClasses = c(CUSTOMER_TIER = "character"),
                       na.strings = "")
    # aggregate by month
    temptable[, lapply(.SD, sum), by = .(CUST_ID), .SDcols = c("Ext Sale")]
  })
)[
  # second aggregation overall
  , lapply(.SD, sum), by = .(CUST_ID), .SDcols = c("Ext Sale")]

# Save Final table
print("Saving master table")
fwrite(mastertable, paste0(out_directory, out_filename))
# rm(mastertable)   ### MODIFIED

print(Sys.time()-starttime)

Edit 2

The OP has asked to include the "fast filter" code which I had omitted for brevity.

However, I have a different approach. Instead of reading the first line of each file to check if INVOICE_DT is within the given date_range my approach filters the file names. The file names contain the year-month in ISO 8601 format.

So, a vector of allowed year-month strings is constructed from the given date_range. Only those file names which contain one of the allowed year-month strings are selected for further processing.

However, selecting the proper files is only the first step. As the date-range may start or end right in the middel of a month, we need also to filter the rows of each processed file. This step is missing from OP's code.

library(data.table, warn.conflicts = FALSE)
library(magrittr)   ### MODIFIED
# library(lubridate, warn.conflicts = FALSE)   ### MODIFIED

################
## PARAMETERS ##
################

# Set path of major source folder for raw transaction data
in_directory <- "Raw Data"   ### MODIFIED

# List names of sub-folders (currently grouped by first two characters of CUST_ID)
in_subfolders <- list("AA-CA", "CB-HZ")

# Set location for output
out_directory <- "YTD Master"   ### MODIFIED
out_filename <- "OUTPUT.csv"

# Set beginning and end of date range to be collected - year-month-day format
date_range <- c("2017-01-01", "2017-02-14")   ### MODIFIED

# Enable or disable filtering of raw files to only grab items bought within certain months to save space.
# If false, all files will be scanned for unique items, which will take longer and be a larger file.
# date_filter <- TRUE   ### MODIFIED


##########
## CODE ##
##########

starttime <- Sys.time()

# create vector of filenames to be processed
in_filenames <- list.files(
  file.path(in_directory, in_subfolders), 
  pattern = "\.txt$", 
  full.names = TRUE, 
  recursive = TRUE)

# filter filenames, only 
selected_in_filenames <- 
  seq(as.Date(date_range[1]), 
      as.Date(date_range[2]), by = "1 month") %>% 
  format("%Y-%m") %>% 
  lapply(function(x) stringr::str_subset(in_filenames, x)) %>% 
  unlist()

# read and aggregate each file separetely
mastertable <- rbindlist(
  lapply(selected_in_filenames, function(fn) {
    message("Processing file: ", fn)
    temptable <- fread(fn,
                       colClasses = c(CUSTOMER_TIER = "character"),
                       na.strings = "")
    # aggregate file but filtered for date_range
    temptable[INVOICE_DT %between% date_range, 
              lapply(.SD, sum), by = .(CUST_ID, QTR = quarter(INVOICE_DT)), 
              .SDcols = c("Ext Sale")]
  })
)[
  # second aggregation overall
  , lapply(.SD, sum), by = .(CUST_ID, QTR), .SDcols = c("Ext Sale")]

# Save Final table
print("Saving master table")
fwrite(mastertable, file.path(out_directory, out_filename))
# rm(mastertable)   ### MODIFIED

print(Sys.time()-starttime)

mastertable
     CUST_ID QTR Ext Sale
1: AK0010001   1  209.970
2: CO0020001   1 1540.300
3: CO0010001   1   -1.565

Note that date_range <- c("2017-01-01", "2017-02-14") now ends mid of February.


与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…
OGeek|极客中国-欢迎来到极客的世界,一个免费开放的程序员编程交流平台!开放,进步,分享!让技术改变生活,让极客改变未来! Welcome to OGeek Q&A Community for programmer and developer-Open, Learning and Share
Click Here to Ask a Question

...