1 | /* |
2 | * Copyright (c) 2017, Matias Fontanini |
3 | * All rights reserved. |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions are |
7 | * met: |
8 | * |
9 | * * Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * * Redistributions in binary form must reproduce the above |
12 | * copyright notice, this list of conditions and the following disclaimer |
13 | * in the documentation and/or other materials provided with the |
14 | * distribution. |
15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
20 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | * |
28 | */ |
29 | |
30 | #include <errno.h> |
31 | #include <memory> |
32 | #include "producer.h" |
33 | #include "exceptions.h" |
34 | #include "message_internal.h" |
35 | |
36 | using std::move; |
37 | using std::string; |
38 | using std::chrono::milliseconds; |
39 | using std::unique_ptr; |
40 | using std::get; |
41 | |
42 | namespace cppkafka { |
43 | |
44 | Producer::Producer(Configuration config) |
45 | : KafkaHandleBase(move(config)), message_payload_policy_(PayloadPolicy::COPY_PAYLOAD) { |
46 | char error_buffer[512]; |
47 | auto config_handle = get_configuration().get_handle(); |
48 | rd_kafka_conf_set_opaque(config_handle, this); |
49 | rd_kafka_t* ptr = rd_kafka_new(RD_KAFKA_PRODUCER, |
50 | rd_kafka_conf_dup(config_handle), |
51 | error_buffer, sizeof(error_buffer)); |
52 | if (!ptr) { |
53 | throw Exception("Failed to create producer handle: " + string(error_buffer)); |
54 | } |
55 | set_handle(ptr); |
56 | } |
57 | |
58 | void Producer::set_payload_policy(PayloadPolicy policy) { |
59 | message_payload_policy_ = policy; |
60 | } |
61 | |
62 | Producer::PayloadPolicy Producer::get_payload_policy() const { |
63 | return message_payload_policy_; |
64 | } |
65 | |
66 | #if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION) |
67 | |
68 | void Producer::produce(const MessageBuilder& builder) { |
69 | do_produce(builder, MessageBuilder::HeaderListType(builder.header_list())); //copy headers |
70 | } |
71 | |
72 | void Producer::produce(MessageBuilder&& builder) { |
73 | do_produce(builder, std::move(builder.header_list())); //move headers |
74 | } |
75 | |
76 | void Producer::produce(const Message& message) { |
77 | do_produce(message, HeaderList<Message::HeaderType>(message.get_header_list())); //copy headers |
78 | } |
79 | |
80 | void Producer::produce(Message&& message) { |
81 | do_produce(message, message.detach_header_list<Message::HeaderType>()); //move headers |
82 | } |
83 | |
84 | #else |
85 | |
86 | void Producer::produce(const MessageBuilder& builder) { |
87 | do_produce(builder); |
88 | } |
89 | |
90 | void Producer::produce(MessageBuilder&& builder) { |
91 | do_produce(builder); |
92 | } |
93 | |
94 | void Producer::produce(const Message& message) { |
95 | do_produce(message); |
96 | } |
97 | |
98 | void Producer::produce(Message&& message) { |
99 | do_produce(message); |
100 | } |
101 | |
102 | #endif |
103 | |
104 | int Producer::poll() { |
105 | return poll(get_timeout()); |
106 | } |
107 | |
108 | int Producer::poll(milliseconds timeout) { |
109 | return rd_kafka_poll(get_handle(), static_cast<int>(timeout.count())); |
110 | } |
111 | |
112 | void Producer::flush() { |
113 | flush(get_timeout()); |
114 | } |
115 | |
116 | void Producer::flush(milliseconds timeout) { |
117 | auto result = rd_kafka_flush(get_handle(), static_cast<int>(timeout.count())); |
118 | check_error(result); |
119 | } |
120 | |
121 | #if (RD_KAFKA_VERSION >= RD_KAFKA_HEADERS_SUPPORT_VERSION) |
122 | |
123 | void Producer::(const MessageBuilder& builder, |
124 | MessageBuilder::HeaderListType&& ) { |
125 | const Buffer& payload = builder.payload(); |
126 | const Buffer& key = builder.key(); |
127 | const int policy = static_cast<int>(message_payload_policy_); |
128 | auto result = rd_kafka_producev(get_handle(), |
129 | RD_KAFKA_V_TOPIC(builder.topic().data()), |
130 | RD_KAFKA_V_PARTITION(builder.partition()), |
131 | RD_KAFKA_V_MSGFLAGS(policy), |
132 | RD_KAFKA_V_TIMESTAMP(builder.timestamp().count()), |
133 | RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()), |
134 | RD_KAFKA_V_HEADERS(headers.release_handle()), //pass ownership to rdkafka |
135 | RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()), |
136 | RD_KAFKA_V_OPAQUE(builder.user_data()), |
137 | RD_KAFKA_V_END); |
138 | check_error(result); |
139 | } |
140 | |
141 | void Producer::(const Message& message, |
142 | MessageBuilder::HeaderListType&& ) { |
143 | const Buffer& payload = message.get_payload(); |
144 | const Buffer& key = message.get_key(); |
145 | const int policy = static_cast<int>(message_payload_policy_); |
146 | int64_t duration = message.get_timestamp() ? message.get_timestamp().get().get_timestamp().count() : 0; |
147 | auto result = rd_kafka_producev(get_handle(), |
148 | RD_KAFKA_V_TOPIC(message.get_topic().data()), |
149 | RD_KAFKA_V_PARTITION(message.get_partition()), |
150 | RD_KAFKA_V_MSGFLAGS(policy), |
151 | RD_KAFKA_V_TIMESTAMP(duration), |
152 | RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()), |
153 | RD_KAFKA_V_HEADERS(headers.release_handle()), //pass ownership to rdkafka |
154 | RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()), |
155 | RD_KAFKA_V_OPAQUE(message.get_user_data()), |
156 | RD_KAFKA_V_END); |
157 | check_error(result); |
158 | } |
159 | |
160 | #else |
161 | |
162 | void Producer::do_produce(const MessageBuilder& builder) { |
163 | const Buffer& payload = builder.payload(); |
164 | const Buffer& key = builder.key(); |
165 | const int policy = static_cast<int>(message_payload_policy_); |
166 | auto result = rd_kafka_producev(get_handle(), |
167 | RD_KAFKA_V_TOPIC(builder.topic().data()), |
168 | RD_KAFKA_V_PARTITION(builder.partition()), |
169 | RD_KAFKA_V_MSGFLAGS(policy), |
170 | RD_KAFKA_V_TIMESTAMP(builder.timestamp().count()), |
171 | RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()), |
172 | RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()), |
173 | RD_KAFKA_V_OPAQUE(builder.user_data()), |
174 | RD_KAFKA_V_END); |
175 | check_error(result); |
176 | } |
177 | |
178 | void Producer::do_produce(const Message& message) { |
179 | const Buffer& payload = message.get_payload(); |
180 | const Buffer& key = message.get_key(); |
181 | const int policy = static_cast<int>(message_payload_policy_); |
182 | int64_t duration = message.get_timestamp() ? message.get_timestamp().get().get_timestamp().count() : 0; |
183 | auto result = rd_kafka_producev(get_handle(), |
184 | RD_KAFKA_V_TOPIC(message.get_topic().data()), |
185 | RD_KAFKA_V_PARTITION(message.get_partition()), |
186 | RD_KAFKA_V_MSGFLAGS(policy), |
187 | RD_KAFKA_V_TIMESTAMP(duration), |
188 | RD_KAFKA_V_KEY((void*)key.get_data(), key.get_size()), |
189 | RD_KAFKA_V_VALUE((void*)payload.get_data(), payload.get_size()), |
190 | RD_KAFKA_V_OPAQUE(message.get_user_data()), |
191 | RD_KAFKA_V_END); |
192 | check_error(result); |
193 | } |
194 | |
195 | #endif //RD_KAFKA_HEADERS_SUPPORT_VERSION |
196 | |
197 | } // cppkafka |
198 | |