List of commits:
Subject Hash Author Date (UTC)
Added packlist fill script 8f3849dc0ad5fa4ad5ce9a6732429c6e647a5187 Sebastian 2019-05-10 15:42:12
Included an export to the storage list from Amazon, imporved useability by adding stops 810be612ed40559434e9ddacb904bc4a766b861d Sebastian 2019-05-09 10:24:36
Added a finish process and removed the order process fc33b421234ef320aa5a61f28cfb1e5d815477ca Sebastian 2019-05-08 13:02:21
Added a encoding sniffing to open the files in right encoding on Windows OS machines d0dbe5da1ca5810bfcc240a1e635ce053168b423 Sebastian 2019-05-06 14:54:04
Small usability improvements for the user to detect errors on different OS 2ec13ba3c85207c799eed1cf48ab1230d245a05f Sebastian 2019-05-06 08:25:54
Combined with the picklist creation project 4e5023c3c2bebc53794fe29edbcf9a7be93c826a Sebastian 2019-05-03 11:40:35
Created a building script for Linux based systems and made it possible to use the script from anywhere inside the filesystem e97b391d7b1cda885c99b6b483d414c75b288b75 Sebastian 2019-04-15 15:00:47
Added a working Linux executable Version, together with a different workspace creation process c84d49cf0f99fd3dcb7faabe82c125c8adc970cb Sebastian 2019-04-12 16:53:29
removed unnecessary files from the report and upload folder 94af82cbd9fd7f592fb6b421986c3d900f1bf532 Sebastian 2019-04-05 12:51:43
Added 2 options: a new order import to plenty and a fba shipment creation db5b9c168dabe3d6524b5674dd977044be286e0a Sebastian 2019-04-05 12:48:53
adjusted to another sync format and added a new export format that includes the sale price d33a1909c4f1ff88569509ad4510a5419e323136 Basti 2019-03-22 10:27:22
first Version of the Sync Upload de9ea87dff9ced84f635dd05d5ae039be870ae8a Basti 2019-03-19 16:32:44
First commit including Readme and folder structure 7e77aa7abd6013ce56d6878c7004973e32011a13 Basti 2019-03-19 10:44:36
Commit 8f3849dc0ad5fa4ad5ce9a6732429c6e647a5187 - Added packlist fill script
Author: Sebastian
Author date (UTC): 2019-05-10 15:42
Committer name: Sebastian
Committer date (UTC): 2019-05-10 15:42
Parent(s): 810be612ed40559434e9ddacb904bc4a766b861d
Signing key:
Tree: 396701f5f758048e8b0f9d4fa078f945d748763d
File Lines added Lines deleted
packages/__pycache__/plenty_export.cpython-37.pyc 0 0
packages/__pycache__/shipment_finish.cpython-37.pyc 0 0
packages/__pycache__/syncfile.cpython-37.pyc 0 0
packages/shipment_finish.py 132 4
packages/syncfile.py 1 1
File packages/__pycache__/plenty_export.cpython-37.pyc changed (mode: 100644) (index a930b0c..3e934f5)
File packages/__pycache__/shipment_finish.cpython-37.pyc changed (mode: 100644) (index 0240a0e..d4baaae)
File packages/__pycache__/syncfile.cpython-37.pyc changed (mode: 100644) (index a5a2b36..bf76bae)
File packages/shipment_finish.py changed (mode: 100644) (index 9602318..c247c2f)
4 4 import tkinter import tkinter
5 5 import tkinter.filedialog import tkinter.filedialog
6 6 import os import os
7 import re
7 8 import sys import sys
8 9 import csv import csv
9 10 import chardet import chardet
11 import datetime
10 12 from packages import plenty_export from packages import plenty_export
11 13
12 14 # Define and initialize the line seperators # Define and initialize the line seperators
13 15 line = "#"*70 line = "#"*70
14 16 altline = "-"*70 altline = "-"*70
15 17
18 # Define a string containing the day+month of the current date
19 todatstr = datetime.datetime.now().strftime("%d-%m")
16 20
17 21 def finishShipment(picklist, folder): def finishShipment(picklist, folder):
18 22 # keys: barcode, packages : { package : {qty, location, packet} } # keys: barcode, packages : { package : {qty, location, packet} }
 
... ... def finishShipment(picklist, folder):
95 99 #Reducing the storage location quantity in plentymarkets #Reducing the storage location quantity in plentymarkets
96 100 plenty_Data = {} plenty_Data = {}
97 101 plenty_Data = reducePlentyMarketsQty(dataset=Data, folder=folder) plenty_Data = reducePlentyMarketsQty(dataset=Data, folder=folder)
102 # sort the pickdata into the packaging list from amazon
103 sortIntoPackageList(Data, folder)
98 104
99 105 return plenty_Data return plenty_Data
100 106
 
... ... def createInboundPlan(dataset, folder):
145 151 else: else:
146 152 break break
147 153
154 uploadname = inboundplan['filepath'].replace("Report", "Upload")
155 uploadname.replace(".csv", ".tsv")
148 156 with open(inboundplan['filepath'], mode='a', encoding=inboundplan['encoding']) as item: with open(inboundplan['filepath'], mode='a', encoding=inboundplan['encoding']) as item:
149 157 writer = csv.DictWriter(item, delimiter='\t', fieldnames=['MerchantSKU', 'Quantity']) writer = csv.DictWriter(item, delimiter='\t', fieldnames=['MerchantSKU', 'Quantity'])
150 158
 
... ... def createInboundPlan(dataset, folder):
162 170
163 171 if(os.path.isfile(inboundplan['filepath'])): if(os.path.isfile(inboundplan['filepath'])):
164 172 print("{0}\n".format(line)) print("{0}\n".format(line))
165 print("File was created at\n{0}.\n".format(folder))
173 print("File was created at\n{0}.\n".format(folder.replace("Report", "Upload"))
166 174 print("Please make sure to fill out all delivery data.\n") print("Please make sure to fill out all delivery data.\n")
167 175 print("{0}\n".format(altline)) print("{0}\n".format(altline))
168 print("Datei wurde erstellt in\n{0}.\n".format(folder))
176 print("Datei wurde erstellt in\n{0}.\n".format(folder.replace("Report", "Upload"))
169 177 print("Bitte stelle sicher das alle Versandsinformationen eingetragen sind.\n") print("Bitte stelle sicher das alle Versandsinformationen eingetragen sind.\n")
170 print("{0}\n".format(line))
171 178 else: else:
172 179 print("{0}\n".format(line)) print("{0}\n".format(line))
173 180 print("Something went wrong..\n") print("Something went wrong..\n")
 
... ... def reducePlentyMarketsQty(dataset, folder):
257 264 print("{0}\n".format(altline)) print("{0}\n".format(altline))
258 265 print("Erstelle eine Liste des derzeitigen Bestands der Plentymarkets Lagerplätze\n") print("Erstelle eine Liste des derzeitigen Bestands der Plentymarkets Lagerplätze\n")
259 266 plenty_export.plentyExport(plentyexport=locationlist, folder=folder.replace('Report','Upload')) plenty_export.plentyExport(plentyexport=locationlist, folder=folder.replace('Report','Upload'))
260 print("{0}\n".format(line))
261 267
262 268 Data_and_fields = {'data':Data, 'fields':column_names} Data_and_fields = {'data':Data, 'fields':column_names}
263 269 return Data_and_fields return Data_and_fields
270 else:
271 print("{0}\n".format(line))
272 print("Skip reducing Plentymarkets storage quantity.\n")
273 print("{0}\n".format(altline))
274 print("Überspringe die Verringerung des Plentymarkets Lagerbestand.\n")
275
276 def sortIntoPackageList(pickdata, folder):
277
278 print("{0}\n".format(line))
279 print("Get the packagelist-template from Amazon after you finish the inbound plan process.\n")
280 print("Be careful to enter the correct amount of packages.\n")
281 print("Save it inside the Report folder.\n")
282 print("{0}\n".format(altline))
283 print("Downloade die Packetlisten Vorlage von Amazon, am Ende des Anlieferplan Vorgangs.")
284 print("Stelle sicher das du die korrekte Kistenanzahl angegegen hast.\n")
285 print("Speichere diesen Export im Report Ordner.\n")
286 print("{0}\n".format(line))
287
288 packlist = {'filepath':'',
289 'encoding':''}
290
291 packlist['filepath'] = tkinter.filedialog.askopenfilename(initialdir=folder,
292 title="Package list template from Amazon")
293
294 if(packlist['filepath']):
295 # Get encoding of the file
296 with open(packlist['filepath'], mode='rb') as item:
297 raw_data = item.read()
298 packlist['encoding'] = chardet.detect(raw_data)['encoding']
299
300 # Read the Data from the location list into a Data Dictionary
301 # define the fixed header before adding the variable ones that derive from
302 # the shipment id and the package number
303 column_names = ["Merchant SKU", "Title", "ASIN", "FNSKU",
304 "external-id", "Condition", "Who Will Prep?",
305 "Prep Type", "Who Will Label?", "Shipped"]
306
307 # open the picklist data to determine the highest package number
308 highest_pkg_num = 0
309 for row in pickdata:
310 for package in pickdata[row]['packages']:
311 if( pickdata[row]['packages'][package]['packet'] ):
312 if(int( pickdata[row]['packages'][package]['packet'] ) > int(highest_pkg_num)):
313 highest_pkg_num = pickdata[row]['packages'][package]['packet']
314
315 # Determine the Shipment ID by opening the file with csv.reader
316 with open(packlist['filepath'], mode='r', encoding=packlist['encoding']) as item:
317 reader = csv.reader(item, delimiter = '\t')
318
319 shipment_id = next(reader)[1]
320
321 # combine the shipment_id with the package number
322 package_headers = []
323 for number in range(int( highest_pkg_num )):
324 for field_type in ['qty', 'date']:
325 package_name = shipment_id + 'U'
326 if(number + 1 < 10):
327 package_name = package_name + '00' + str( number + 1 )
328 else:
329 package_name = package_name + '0' + str(number + 1)
330 if(field_type == 'qty'):
331 package_name = package_name + ' - Unit Quantity'
332 package_headers.append(package_name)
333 else:
334 package_name = package_name + ' Expiration Date (dd.mm.yy)'
335 package_headers.append(package_name)
336
337 for header in package_headers:
338 column_names.append(header)
339
340 Data = {}
341
342 with open(packlist['filepath'], mode='r', encoding=packlist['encoding']) as item:
343 reader = csv.DictReader(item, delimiter='\t', fieldnames=column_names)
264 344
345 for row in reader:
346 value = ''
347 values = [value for value in range( len(column_names) )]
348 for index, name in enumerate( column_names ):
349 values[index] = row[column_names[index]]
350
351 Data[row['Merchant SKU']] = dict(zip(column_names, values))
352
353 for row in Data:
354 if(Data[row]['Merchant SKU'] and Data[row]['FNSKU'] and not( Data[row]['FNSKU'] == 'FNSKU' )):
355 for pick_row in pickdata:
356 for package in pickdata[Data[ row ]['Merchant SKU']]['packages']:
357 try:
358 if(pickdata[Data[ row ]['Merchant SKU']]['packages'][package]['packet']):
359 if(int(pickdata[Data[ row ]['Merchant SKU']]['packages'][package]['packet']) < 10):
360 zeroes = '00'
361 else:
362 zeroes ='0'
363 else:
364 print("{0}\n".format(line))
365 print( "{0} doesn't have a packet assigned to it!".format( Data[row]['Merchant SKU'] ) )
366 print("Correct the mistake and try again.")
367 print("{0}\n".format(altline))
368 print("{0} wurde keine Kiste zugewiesen!".format( Data[row]['Merchant SKU'] ))
369 print("Fehler beheben und erneut versuchen.")
370 print("\npress ENTER to continue...")
371 input()
372 sys.exit()
373 except ValueError:
374 print("SKU: {0}, packet: {1}\n"
375 .format(pickdata[Data[row]['Merchant SKU']],
376 pickdata[Data[row]['Merchant SKU']]['packages'][package]['packet'] ))
377 if(pickdata[Data[ row ]['Merchant SKU']]['packages'][package]['packet']):
378 packet_column = shipment_id + 'U' + zeroes + pickdata[Data[ row ]['Merchant SKU']]['packages'][package]['packet'] + ' - Unit Quantity'
379 Data[row][packet_column] = pickdata[Data[row]['Merchant SKU']]['packages'][package]['quantity']
380 # Write the result to a new file
381 filepath = folder.replace('Report', 'Upload') + '/packliste-' + todatstr + '.tsv'
382
383 with open(filepath, mode='w') as item:
384 writer = csv.DictWriter(item, delimiter='\t', fieldnames=column_names)
385
386 # Not writing the header so that the structure of the amazon format stays intact
387 for row in Data:
388 writer.writerow(Data[row])
389
390 # Check if the file was created
391 if(os.path.isfile(filepath)):
392 print("The file was succesfully created!\n{0}".format(filepath))
File packages/syncfile.py changed (mode: 100644) (index a47770b..2cd54e9)
... ... def writeNewCsv(dataset, path, header, name):
22 22 output_path = path + output_name output_path = path + output_name
23 23
24 24 with open(output_path, mode='a') as item: with open(output_path, mode='a') as item:
25 writer = csv.DictWriter(item, delimiter=";", fieldnames=header)
25 writer = csv.DictWriter(item, delimiter=";", fieldnames=header, lineterminator='\n')
26 26 writer.writeheader() writer.writeheader()
27 27 for row in dataset: for row in dataset:
28 28 writer.writerow(dataset[row]) writer.writerow(dataset[row])
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/initBasti/Complete_Order_Plenty_Update

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/initBasti/Complete_Order_Plenty_Update

Clone this repository using git:
git clone git://git.rocketgit.com/user/initBasti/Complete_Order_Plenty_Update

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main