initBasti / Amazon2PlentySync (public) (License: GPLv3) (since 2019-01-27) (hash sha1)
Transfer your data from you Amazon Flatfile spreadsheet over to the Plentymarkets system. How to is included in the readme
List of commits:
Subject Hash Author Date (UTC)
added a filter for items with only 1 size that currently works with single parent child combinations fd3bf2b659614d5518884eb3da77b564cd0018eb Sebastian Fricke 2019-02-28 16:08:57
Market connection adjusted to AmazonFBA 1feb4b2e96c6a55ad0494696fd18fd6fb42babb0 Sebastian Fricke 2019-02-25 12:21:05
current version Feb 2019 00b24836dd378f21942ed323c2b66f928b9fb4c4 Sebastian Fricke 2019-02-25 09:00:00
Changes to fit to the new flatfile format 91cd339571f607e88f6e922f1a47630c4c8d62a7 Sebastian Fricke 2019-02-08 13:28:02
Small removal of redundant code b271de0b1be1d83be088b00a35b5618af088b58a Sebastian Fricke 2019-01-30 18:08:15
General improvements and property upload bb48084db4359210eb892a04f1322f6fda822bef Sebastian Fricke 2019-01-30 17:43:32
Fixed scripts according to dataformat changes + readme dec28d9e6ff5c5c903d5ca01a969e661d43b66c6 Sebastian Fricke 2019-01-29 21:08:04
Working Checkboxes and file import 25378c68a6220c1c6570642920e6150a50415153 Sebastian Fricke 2019-01-29 21:03:23
Added checkboxes, descriptions, import and runbutton 2021f0960e70c8c229ec08488165dc01b998a6e0 Sebastian Fricke 2019-01-27 22:19:18
Added market connection, cosmetics in product import c9a771d5e7a3a80adc650e773c568e00dd8e2aea Sebastian Fricke 2019-01-23 15:01:47
Amazon Data Upload 33dbd0ed6945c01d8917ceae3cf3964f051a2288 Sebastian Fricke 2019-01-22 14:43:39
Readme started, amazon sku upload, vari upload, images f43a9e83598c3e4623bcb08667e2b4e649b2cdea Sebastian Fricke 2019-01-22 10:44:40
Amazon SKU Upload 8586da2ae91d49c81a0d9b6ff220c8a1b1b011a6 Sebastian Fricke 2019-01-16 18:36:54
Inital Commit with current working version of the CLI Tool and the work in progress of the GUI. 207fef4277f7c169aa79eb39ec1aaaab258b888c Sebastian Fricke 2019-01-16 09:47:43
Initial commit ba965ee75fe09437fb08da5edd25b20e39e17eff Sebastian Fricke 2019-01-16 09:42:30
Commit fd3bf2b659614d5518884eb3da77b564cd0018eb - added a filter for items with only 1 size that currently works with single parent child combinations
Author: Sebastian Fricke
Author date (UTC): 2019-02-28 16:08
Committer name: Sebastian Fricke
Committer date (UTC): 2019-02-28 16:08
Parent(s): 1feb4b2e96c6a55ad0494696fd18fd6fb42babb0
Signing key:
Tree: e74b2f539612f784cf9c97850691e962c9422da5
File Lines added Lines deleted
packages/variation_upload.py 32 10
File packages/variation_upload.py changed (mode: 100644) (index e1a4191..c1b4b5a)
1 from csv import DictReader, DictWriter
1 import csv
2 2 from os.path import isfile from os.path import isfile
3 3 try: try:
4 4 from sortedcontainers import SortedDict from sortedcontainers import SortedDict
 
... ... def writeCSV(dataobject, name, columns):
23 23 str(output_path_number) + datatype str(output_path_number) + datatype
24 24
25 25 with open(output_path, mode='a') as item: with open(output_path, mode='a') as item:
26 writer = DictWriter(item, delimiter=";", fieldnames=columns)
26 writer = csv.DictWriter(item, delimiter=";", fieldnames=columns)
27 27 writer.writeheader() writer.writeheader()
28 28 for row in dataobject: for row in dataobject:
29 29 writer.writerow(dataobject[row]) writer.writerow(dataobject[row])
 
... ... def variationUpload(flatfile, intern_number):
42 42 'PurchasePrice', 'MainWarehouse', 'Availability', 'AutoStockVisible', 'PurchasePrice', 'MainWarehouse', 'Availability', 'AutoStockVisible',
43 43 'ExternalID'] 'ExternalID']
44 44
45 # get the amount of different sizes to exclude adding the size if there is only a single one as attribute.
46 number_sizes = numberOfSizes(flatfile)
47
45 48 # create a Data Dictionary and fill it with the necessary values from the flatfile # create a Data Dictionary and fill it with the necessary values from the flatfile
46 49 Data = SortedDict() Data = SortedDict()
47 50
48 51 with open(flatfile, mode='r') as item: with open(flatfile, mode='r') as item:
49 reader = DictReader(item, delimiter=";")
52 reader = csv.DictReader(item, delimiter=";")
50 53 for row in reader: for row in reader:
51 54 if(row['parent_child'] == 'parent'): if(row['parent_child'] == 'parent'):
52 55 item_name = row['item_name'] item_name = row['item_name']
 
... ... def variationUpload(flatfile, intern_number):
72 75
73 76 if(row['color_name']): if(row['color_name']):
74 77 attributes = 'color_name:' + row['color_name'] attributes = 'color_name:' + row['color_name']
75 if(row['size_name']):
78 if(row['size_name'] and number_sizes > 1):
76 79 attributes += ';size_name:' + row['size_name'] attributes += ';size_name:' + row['size_name']
77 80 try: try:
78 81 values = ['', '', row['item_sku'], item_name, '', values = ['', '', row['item_sku'], item_name, '',
 
... ... def variationUpload(flatfile, intern_number):
89 92 # open the intern numbers csv and fill in the remaining missing fields by using the # open the intern numbers csv and fill in the remaining missing fields by using the
90 93 # item_sku as dict key # item_sku as dict key
91 94 with open(intern_number, mode='r') as item: with open(intern_number, mode='r') as item:
92 reader = DictReader(item, delimiter=';')
95 reader = csv.DictReader(item, delimiter=';')
93 96 for row in reader: for row in reader:
94 97 # check if the sku is within the keys of the Data Dictionary # check if the sku is within the keys of the Data Dictionary
95 98 if(row['amazon_sku'] in [*Data]): if(row['amazon_sku'] in [*Data]):
 
... ... def setActive(flatfile, export):
110 113 Data = {} Data = {}
111 114 # open the flatfile to get the sku names # open the flatfile to get the sku names
112 115 with open(flatfile, mode='r') as item: with open(flatfile, mode='r') as item:
113 reader = DictReader(item, delimiter=';')
116 reader = csv.DictReader(item, delimiter=';')
114 117
115 118 for row in reader: for row in reader:
116 119 values = ['Y', ''] values = ['Y', '']
117 120 Data[row['item_sku']] = dict(zip(column_names, values)) Data[row['item_sku']] = dict(zip(column_names, values))
118 121
119 122 with open(export, mode='r') as item: with open(export, mode='r') as item:
120 reader = DictReader(item, delimiter=';')
123 reader = csv.DictReader(item, delimiter=';')
121 124 for row in reader: for row in reader:
122 125 if(row['VariationNumber'] in [*Data]): if(row['VariationNumber'] in [*Data]):
123 126 Data[row['VariationNumber']]['VariationID'] = row['VariationID'] Data[row['VariationNumber']]['VariationID'] = row['VariationID']
 
... ... def EANUpload(flatfile, export):
132 135 'Code', 'VariationID', 'VariationNumber'] 'Code', 'VariationID', 'VariationNumber']
133 136 Data = {} Data = {}
134 137 with open(flatfile, mode='r') as item: with open(flatfile, mode='r') as item:
135 reader = DictReader(item, delimiter=";")
138 reader = csv.DictReader(item, delimiter=";")
136 139
137 140 for row in reader: for row in reader:
138 141 values = ['3', 'UPC', 'UPC', values = ['3', 'UPC', 'UPC',
 
... ... def EANUpload(flatfile, export):
141 144
142 145 # open the exported file to get the variation id # open the exported file to get the variation id
143 146 with open(export, mode='r') as item: with open(export, mode='r') as item:
144 reader = DictReader(item, delimiter=";")
147 reader = csv.DictReader(item, delimiter=";")
145 148
146 149 for row in reader: for row in reader:
147 150 if(row['VariationNumber'] in [*Data]): if(row['VariationNumber'] in [*Data]):
 
... ... def marketConnection(export, ebay=0, amazon=0):
159 162
160 163 Data = {} Data = {}
161 164 with open(export, mode='r') as item: with open(export, mode='r') as item:
162 reader = DictReader(item, delimiter=';')
165 reader = csv.DictReader(item, delimiter=';')
163 166
164 167 for row in reader: for row in reader:
165 168 if row['VariationID'] and row['VariationNumber']: if row['VariationID'] and row['VariationNumber']:
 
... ... def marketConnection(export, ebay=0, amazon=0):
169 172
170 173
171 174 output_path = writeCSV(Data, 'market_connect', column_names) output_path = writeCSV(Data, 'market_connect', column_names)
175
176 def numberOfSizes(flatfile):
177 # open the flatfile and read the size of each variation, put all of them in a set
178 # and return the size of the set.
179
180 length_set = 0
181 sizeset = set()
182
183 with open(flatfile, mode='r') as item:
184 reader = csv.DictReader(item, delimiter=';')
185
186 for row in reader:
187 sizeset.add(row['size_name'])
188
189 sizeset.discard('')
190
191 print("lenght of set %0, content of set %1",len(sizeset), sizeset)
192
193 return len(sizeset)
Hints:
Before first commit, do not forget to setup your git environment:
git config --global user.name "your_name_here"
git config --global user.email "your@email_here"

Clone this repository using HTTP(S):
git clone https://rocketgit.com/user/initBasti/Amazon2PlentySync

Clone this repository using ssh (do not forget to upload a key first):
git clone ssh://rocketgit@ssh.rocketgit.com/user/initBasti/Amazon2PlentySync

Clone this repository using git:
git clone git://git.rocketgit.com/user/initBasti/Amazon2PlentySync

You are allowed to anonymously push to this repository.
This means that your pushed commits will automatically be transformed into a merge request:
... clone the repository ...
... make some changes and some commits ...
git push origin main