initBasti / Amazon2PlentySync (public) (License: GPLv3) (since 2019-01-27) (hash sha1)
Transfer your data from you Amazon Flatfile spreadsheet over to the Plentymarkets system. How to is included in the readme
List of commits:
Subject Hash Author Date (UTC)
Readme started, amazon sku upload, vari upload, images f43a9e83598c3e4623bcb08667e2b4e649b2cdea Sebastian Fricke 2019-01-22 10:44:40
Amazon SKU Upload 8586da2ae91d49c81a0d9b6ff220c8a1b1b011a6 Sebastian Fricke 2019-01-16 18:36:54
Inital Commit with current working version of the CLI Tool and the work in progress of the GUI. 207fef4277f7c169aa79eb39ec1aaaab258b888c Sebastian Fricke 2019-01-16 09:47:43
Initial commit ba965ee75fe09437fb08da5edd25b20e39e17eff Sebastian Fricke 2019-01-16 09:42:30
Commit f43a9e83598c3e4623bcb08667e2b4e649b2cdea - Readme started, amazon sku upload, vari upload, images
Added the image upload script, started writing the readme, added the amazon sku upload and changed a few functions in variation upload to remove redundant features
Author: Sebastian Fricke
Author date (UTC): 2019-01-22 10:44
Committer name: Sebastian Fricke
Committer date (UTC): 2019-01-22 10:44
Parent(s): 534cb5f7b82e8b9532b061f08d77a4e4fd4e4dbb
Signing key:
Tree: e58163f665136d9273340d51dc4a886e3076d779
File Lines added Lines deleted
.gitignore 2 0
README.md 9 0
packages/amazon_data_upload.py 25 19
packages/image_upload.py 153 0
packages/variation_upload.py 16 9
product_import.py 20 11
File .gitignore changed (mode: 100644) (index a65d046..19ce6e7)
... ... docs/_build/
56 56
57 57 # PyBuilder # PyBuilder
58 58 target/ target/
59
60 Upload/
File README.md changed (mode: 100644) (index f64078d..0946860)
1 1 AmazonToPlentyMarkets AmazonToPlentyMarkets
2
3 Guide to import your clothing products from your Amazon Flatfile to PlentyMarkets
4
5 Preperation
6
7 Check your Flatfile Version, the current version of this program works with the appereal flatfile of 2019.
8
9 Make sure to prepare your PlentyMarkets Account you need the following list of dataformats:
10 -
File packages/amazon_data_upload.py changed (mode: 100644) (index f636f7a..0de9797)
... ... def writeCSV(dataobject, name, columns):
15 15
16 16 output_path_number = 1 output_path_number = 1
17 17 datatype = ".csv" datatype = ".csv"
18 output_path = "Upload/" + name + "_upload_" + str(output_path_number) + datatype
18 output_path = "Upload/" + name + "_upload_" + \
19 str(output_path_number) + datatype
19 20
20 21 while(isfile(output_path)): while(isfile(output_path)):
21 22 output_path_number = int(output_path_number) + 1 output_path_number = int(output_path_number) + 1
22 output_path = "Upload/" + name + "_upload_" + str(output_path_number) + datatype
23 output_path = "Upload/" + name + "_upload_" + \
24 str(output_path_number) + datatype
23 25
24 26 with open(output_path, mode='a') as item: with open(output_path, mode='a') as item:
25 27 writer = csv.DictWriter(item, delimiter=";", fieldnames=columns) writer = csv.DictWriter(item, delimiter=";", fieldnames=columns)
 
... ... def writeCSV(dataobject, name, columns):
33 35 return output_path return output_path
34 36
35 37
36 def amazon_sku_upload(flatfile, export):
38 def amazonSkuUpload(flatfile, export):
37 39
38 column_names=['VariationID','MarketID','MarketAccountID','SKU','ParentSKU']
39 Data = SortedDict()
40 column_names = ['VariationID', 'MarketID',
41 'MarketAccountID', 'SKU', 'ParentSKU']
42 Data = SortedDict()
40 43
41 with open(export, mode='r') as item:
42 reader = csv.DictReader(item, delimiter=';')
43 item_number = 1
44 for row in reader:
45 if(row['VariationID']):
46 values=[row['VariationID'],'4','0','','']
47 Data[row['VariationNumber']]=SortedDict(zip(column_names,values))
44 with open(export, mode='r') as item:
45 reader = csv.DictReader(item, delimiter=';')
46 item_number = 1
47 for row in reader:
48 if(row['VariationID']):
49 values = [row['VariationID'], '4', '0', '', '']
50 Data[row['VariationNumber']] = SortedDict(
51 zip(column_names, values))
48 52
49 with open(flatfile, mode='r') as item:
50 reader = csv.DictReader(item, delimiter=';')
51 for row in reader:
52 if(row['item_sku'] in [*Data]):
53 Data[row['item_sku']]['SKU'] = row['item_sku']
54 Data[row['item_sku']]['ParentSKU'] = row['parent_sku']
53 with open(flatfile, mode='r') as item:
54 reader = csv.DictReader(item, delimiter=';')
55 for row in reader:
56 if(row['item_sku'] in [*Data]):
57 Data[row['item_sku']]['SKU'] = row['item_sku']
58 Data[row['item_sku']]['ParentSKU'] = row['parent_sku']
55 59
56 output_path = writeCSV(Data, 'sku_amazon', column_names)
60 output_path = writeCSV(Data, 'sku_amazon', column_names)
61
62 def amazon
File packages/image_upload.py added (mode: 100644) (index 0000000..af0de0c)
1 import csv
2 from sortedcontainers import SortedDict
3 import re
4 from os.path import isfile
5
6
7 def searchImage(imglink, itemid, variationid, variationlinks, target):
8
9 existing = False
10 blockEntry = False
11
12 for row in target:
13 if(target[row]['ItemImageUrl'] == imglink and
14 target[row]['ItemImageItemID'] == itemid and
15 (re.search(variationid, variationlinks))):
16
17 existing = True
18 blockEntry = True
19 pass
20
21 if(target[row]['ItemImageUrl'] == imglink and
22 target[row]['ItemImageItemID'] == itemid and
23 not(re.search(variationid, variationlinks))):
24
25 existing = True
26 if(target[row]['VariationLink'] and
27 not(re.search(variationid, target[row]['VariationLink']))):
28 target[row]['VariationLink'] += ', ' + variationid
29
30 if(not(existing)):
31
32 variationlinks = str(variationid)
33
34 return variationlinks, blockEntry
35
36
37 def imageUpload(flatfile, exportfile):
38 # open the export file, scrap the important data and save it into an dictionary
39 with open(exportfile, mode='r') as item:
40 Data = {}
41 reader = csv.DictReader(item, delimiter=';')
42 names = ['variation_id', 'item_id']
43 for row in reader:
44 identification = row['VariationID'], row['ItemID']
45 Data[row['VariationNumber']] = dict(zip(names, identification))
46
47 # ------------------------------------------------------------------------
48 # if you have a file ending that you need to remove from your Link add it
49 # to the follwing List
50 # EXAMPLE: ['?dl=0', '?dl=1']
51 # ------------------------------------------------------------------------
52 endings = ['?dl=0']
53 replacement = ['']
54
55 searchterm = ['www.dropbox']
56 replaceterm = ['dl.dropbox']
57
58 # open the flat file, take the 3rd line as the header and scrap the image
59 # links + the parent sku
60 # combine it with the data out of the export file and save all into a
61 # multidimensional dictionary
62 count = 0
63 with open(flatfile, mode='r') as item:
64 links = SortedDict()
65 # for i in range(2):
66 # next(item)
67 reader = csv.DictReader(item, delimiter=';')
68 names = ['Link1', 'Link2', 'Link3', 'Link4',
69 'Link5', 'Link6', 'Link7', 'Link8', 'Link9']
70 for row in reader:
71 imglinks = [
72 row['main_image_url'],
73 row['other_image_url1'],
74 row['other_image_url2'],
75 row['other_image_url3'],
76 row['other_image_url4'],
77 row['other_image_url5'],
78 row['other_image_url6'],
79 row['other_image_url7'],
80 row['other_image_url8']
81 ]
82
83 # check every img link for the ending if it ends with an invalid ?dl=0
84 # replace it with ?raw=1
85 for img in imglinks:
86 if(img):
87 for num in range(len(searchterm)):
88 if(re.search(searchterm[num], img)):
89 imglinks[imglinks.index(img)] = (img.replace(
90 searchterm[num], replaceterm[num]) and img.replace(endings[num], replacement[num]))
91 count += 1
92
93 if(row['item_sku'] in [*Data]):
94 links[row['item_sku']] = SortedDict(zip(names, imglinks))
95 links[row['item_sku']]['itemid'] = Data[row['item_sku']]['item_id']
96 links[row['item_sku']
97 ]['variationid'] = Data[row['item_sku']]['variation_id']
98 if(row['parent_sku']):
99 links[row['item_sku']]['parentsku'] = row['parent_sku']
100 else:
101 links[row['item_sku']]['parentsku'] = row['item_sku']
102
103 # Print the amount of changes made to links in order to make them work
104 print("Amount of fixed invalid link endings: {0} (ending with {1}, replaced with {2})".format(
105 count, ",".join(searchterm), ",".join(replaceterm)))
106
107 # creating a dictionary that contains only one entry per imagelink and
108 # combining the variation id into the duplicates
109 Data = SortedDict()
110 number = 1
111 names = ['ItemImageItemID', 'PrimaryVariationCustomNumber',
112 'VariationLink', 'ItemImageUrl']
113 blockEntry = False
114 for row in links:
115 variationlinks = ''
116 for pic in re.findall(r'Link\d{1}', ",".join([*links[row]])):
117 if(links[row][pic]):
118 variationlinks, blockEntry = searchImage(
119 links[row][pic],
120 links[row]['itemid'],
121 links[row]['variationid'],
122 variationlinks,
123 Data)
124 if(not(blockEntry) and variationlinks):
125 values = [links[row]['itemid'], links[row]
126 ['parentsku'], variationlinks, links[row][pic]]
127 # If the digit is a single digit add a 0 in front of it
128 if(re.match(r'\d(?!\S)', str(number))):
129 number = '0' + str(number)
130 Data['IMG' + str(number)
131 ] = SortedDict(zip(names, values))
132 number = int(number) + 1
133 # create the path for the new file name use a while loop to create a new name in case if the old one is taken
134 file_number_extension = 1
135 datatype = ".csv"
136 newfile_path = "Upload/plenty_upload_" + \
137 str(file_number_extension) + datatype
138
139 while(isfile(newfile_path)):
140 file_number_extension = int(file_number_extension) + 1
141 newfile_path = "Upload/plenty_upload_" + \
142 str(file_number_extension) + datatype
143
144 # write the Data Dictionary into a new Csv file
145 with open(newfile_path, mode='a') as item:
146 writer = csv.DictWriter(item, delimiter=";", fieldnames=names)
147 writer.writeheader()
148 for row in Data:
149 writer.writerow(Data[row])
150
151 if(isfile(newfile_path)):
152 print("Upload File succesfully created into the Upload folder! Name: {0}"
153 .format(newfile_path))
File packages/variation_upload.py changed (mode: 100644) (index cd4c2fe..8247872)
... ... def writeCSV(dataobject, name, columns):
14 14
15 15 output_path_number = 1 output_path_number = 1
16 16 datatype = ".csv" datatype = ".csv"
17 output_path = "Upload/" + name + "_upload_" + str(output_path_number) + datatype
17 output_path = "Upload/" + name + "_upload_" + \
18 str(output_path_number) + datatype
18 19
19 20 while(isfile(output_path)): while(isfile(output_path)):
20 21 output_path_number = int(output_path_number) + 1 output_path_number = int(output_path_number) + 1
21 output_path = "Upload/" + name + "_upload_" + str(output_path_number) + datatype
22 output_path = "Upload/" + name + "_upload_" + \
23 str(output_path_number) + datatype
22 24
23 25 with open(output_path, mode='a') as item: with open(output_path, mode='a') as item:
24 26 writer = DictWriter(item, delimiter=";", fieldnames=columns) writer = DictWriter(item, delimiter=";", fieldnames=columns)
 
... ... def writeCSV(dataobject, name, columns):
35 37 def variationUpload(flatfile, intern_number): def variationUpload(flatfile, intern_number):
36 38
37 39 # The column header names # The column header names
38 names = ['ItemID', 'VariationID', 'VariationNumber', 'VariationName', 'Position', 'LengthMM', 'WidthMM', 'HeightMM', 'WeightG', 'VariationAttributes', 'PurchasePrice', 'MainWarehouse', 'Availability', 'AutoStockVisible']
40 names = ['ItemID', 'VariationID', 'VariationNumber', 'VariationName', 'Position', 'LengthMM', 'WidthMM', 'HeightMM',
41 'WeightG', 'VariationAttributes', 'PurchasePrice', 'MainWarehouse', 'Availability', 'AutoStockVisible']
39 42
40 43 # create a Data Dictionary and fill it with the necessary values from the flatfile # create a Data Dictionary and fill it with the necessary values from the flatfile
41 44 Data = SortedDict() Data = SortedDict()
 
... ... def variationUpload(flatfile, intern_number):
57 60 row['package_width'] = int(float(row['package_width'])) row['package_width'] = int(float(row['package_width']))
58 61 except ValueError as err: except ValueError as err:
59 62 print(err) print(err)
60 print("/nPlease copy the values for height, length, width and weight\nfrom the children to the parent variation in the flatfile.\n")
63 print(
64 "/nPlease copy the values for height, length, width and weight\nfrom the children to the parent variation in the flatfile.\n")
61 65 exit() exit()
62 66
63 67 if(row['color_name']): if(row['color_name']):
 
... ... def variationUpload(flatfile, intern_number):
65 69 if(row['size_name']): if(row['size_name']):
66 70 attributes += ';size_name:' + row['size_name'] attributes += ';size_name:' + row['size_name']
67 71 if(row['outer_material_type']): if(row['outer_material_type']):
68 attributes += ';material_name:' + row['outer_material_type']
72 attributes += ';material_name:' + \
73 row['outer_material_type']
69 74 if('pattern' in [*row] and row['pattern']): if('pattern' in [*row] and row['pattern']):
70 75 attributes += ';pattern:' + row['pattern'] attributes += ';pattern:' + row['pattern']
71 76 try: try:
72 values = ['', '', row['item_sku'], item_name, '', int(row['package_length']) * 10, int(row['package_width']) * 10, int(row['package_height']) * 10, row['package_weight'], attributes, row['standard_price'], 'Badel', 'Y', 'Y']
77 values = ['', '', row['item_sku'], item_name, '', int(row['package_length']) * 10, int(row['package_width']) * 10, int(
78 row['package_height']) * 10, row['package_weight'], attributes, row['standard_price'], 'Badel', 'Y', 'Y']
73 79 except Exception as err: except Exception as err:
74 80 print(err) print(err)
75 81 exit() exit()
 
... ... def variationUpload(flatfile, intern_number):
82 88 # check if the sku is within the keys of the Data Dictionary # check if the sku is within the keys of the Data Dictionary
83 89 if(row['amazon_sku'] in [*Data]): if(row['amazon_sku'] in [*Data]):
84 90 Data[row['amazon_sku']]['ItemID'] = row['article_id'] Data[row['amazon_sku']]['ItemID'] = row['article_id']
85 Data[row['amazon_sku']]['VariationID'] = row['full_number']
86 91 if(not(row['position'] == 0)): if(not(row['position'] == 0)):
87 92 Data[row['amazon_sku']]['Position'] = row['position'] Data[row['amazon_sku']]['Position'] = row['position']
88 93
 
... ... def setActive(flatfile, export):
114 119
115 120 def EANUpload(flatfile, export): def EANUpload(flatfile, export):
116 121 # open the flatfile get the ean for an sku and save it into a dictionary with columnheaders of the plentymarket dataformat # open the flatfile get the ean for an sku and save it into a dictionary with columnheaders of the plentymarket dataformat
117 column_names = ['BarcodeID', 'BarcodeName', 'BarcodeType', 'Code', 'VariationID', 'VariationNumber']
122 column_names = ['BarcodeID', 'BarcodeName', 'BarcodeType',
123 'Code', 'VariationID', 'VariationNumber']
118 124 Data = {} Data = {}
119 125 with open(flatfile, mode='r') as item: with open(flatfile, mode='r') as item:
120 126 reader = DictReader(item, delimiter=";") reader = DictReader(item, delimiter=";")
121 127
122 128 for row in reader: for row in reader:
123 values = ['3', 'UPC', 'UPC', row['external_product_id'], '', row['item_sku']]
129 values = ['3', 'UPC', 'UPC',
130 row['external_product_id'], '', row['item_sku']]
124 131 Data[row['item_sku']] = dict(zip(column_names, values)) Data[row['item_sku']] = dict(zip(column_names, values))
125 132
126 133 # open the exported file to get the variation id # open the exported file to get the variation id
File product_import.py changed (mode: 100644) (index 33f0620..51b2add)
... ... from tkinter import Tk
2 2 from tkinter.filedialog import askopenfilename from tkinter.filedialog import askopenfilename
3 3 from sys import exit from sys import exit
4 4 from packages.item_upload import itemUpload from packages.item_upload import itemUpload
5 from packages.attribute_upload import attributeUpload
5 # from packages.attribute_upload import attributeUpload
6 6 from packages.variation_upload import variationUpload, setActive, EANUpload from packages.variation_upload import variationUpload, setActive, EANUpload
7 7 from packages.stock_upload import stockUpload, priceUpload from packages.stock_upload import stockUpload, priceUpload
8 8 from packages.UploadGUI import UploadGUI from packages.UploadGUI import UploadGUI
9 from packages.amazon_data_upload import amazon_sku_upload
9 from packages.amazon_data_upload import amazonSkuUpload
10 from packages.image_upload import imageUpload
10 11
11 12
12 13 def main(): def main():
 
... ... def main():
24 25 print("spreadsheet csv containing the intern numbers : ", intern_number) print("spreadsheet csv containing the intern numbers : ", intern_number)
25 26 try: try:
26 27 print("Item Upload") print("Item Upload")
27 # itempath = itemUpload(sheet, intern_number)
28 itemUpload(sheet, intern_number)
28 29 except Exception as exc: except Exception as exc:
29 30 print(exc) print(exc)
30 31 print("Item Upload failed!") print("Item Upload failed!")
31 32
32 33 try: try:
33 34 print("Variation Upload") print("Variation Upload")
34 # variationpath = variationUpload(sheet, intern_number)
35 variationUpload(sheet, intern_number)
35 36 except Exception as exc: except Exception as exc:
36 37 print(exc) print(exc)
37 38 print("VariationUpload failed!") print("VariationUpload failed!")
 
... ... def main():
47 48 print("spreadsheet csv containing the export : ", export) print("spreadsheet csv containing the export : ", export)
48 49 try: try:
49 50 print("EAN, Active & Price Upload") print("EAN, Active & Price Upload")
50 # EANUpload(sheet, export)
51 # setActive(sheet, export)
52 # priceUpload(sheet, export)
51 EANUpload(sheet, export)
52 setActive(sheet, export)
53 priceUpload(sheet, export)
53 54 except FileNotFoundError as err: except FileNotFoundError as err:
54 55 print(err) print(err)
55 56 print("Missing Data, check if you have\n - a flatfile\n - a intern file table\n - export file from plentymarkets\n - a sheet with the stock numbers!\n") print("Missing Data, check if you have\n - a flatfile\n - a intern file table\n - export file from plentymarkets\n - a sheet with the stock numbers!\n")
 
... ... def main():
57 58 print("\nOpen your amazon storage report and save it as an csv.\n") print("\nOpen your amazon storage report and save it as an csv.\n")
58 59 stocklist = askopenfilename() stocklist = askopenfilename()
59 60 print("spreadsheet csv containing the current stock : ", stocklist) print("spreadsheet csv containing the current stock : ", stocklist)
60
61 # stockUpload(sheet, export, stocklist)
61
62 stockUpload(sheet, export, stocklist)
62 63
63 64 print("\nCreate a upload file for the SKU and Parent_SKU\nto connect existing items from amazon to plentyMarkets.\n") print("\nCreate a upload file for the SKU and Parent_SKU\nto connect existing items from amazon to plentyMarkets.\n")
64 65
65 amazon_sku_upload(sheet, export)
66 amazonSkuUpload(sheet, export)
67
68 print("\nCollect the imagelinks from the flatfile, sorts them and assigns the variation ID.\n")
69 try:
70 imageUpload(sheet, export)
71 except Exception as err:
72 print(err)
73 print("Image Upload failed!")
74
66 75 # In case of new attributes uncomment and watch attribute_upload.py first # In case of new attributes uncomment and watch attribute_upload.py first
67 76 # try: # try:
68 77 # attributeUpload(sheet) # attributeUpload(sheet)
69 78 # except: # except:
70 #print("Attribute Upload failed!")
79 # print("Attribute Upload failed!")
71 80
72 81
73 82 if __name__ == '__main__': if __name__ == '__main__':
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/initBasti/Amazon2PlentySync

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/initBasti/Amazon2PlentySync

Clone this repository using git:
git clone git://git.rocketgit.com/user/initBasti/Amazon2PlentySync

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main