// IPC transport implementation that uses shared memory. // This is the server side // // The server side has knowledge about the layout of the shared memory // and the state transitions. Both are explained in sharedmem_ipc_client.h // server对共享内存的布局是知情的。 // // As opposed to SharedMemIPClient, the Server object should be one for the // entire lifetime of the target process. The server is in charge of creating // the events (ping, pong) both for the client and for the target that are used // to signal the IPC and also in charge of setting the initial state of the // channels. // 与Client不同的是,对整个target进程生命周期来说,server object只能有一个 // 它负责为client和用于signal IPC的target创建事件,也负责设置channel(信道)的初始化状态 // // When an IPC is ready, the server relies on being called by on the // ThreadPingEventReady callback. The IPC server then retrieves the buffer, // marshals it into a CrossCallParam object and calls the Dispatcher, who is in // charge of fulfilling the IPC request. // IPC到来时,server在ThreadPingEventReady回调函数上回复。 // IPC server解包buffer,置入CrossCallParam对象并调用Dispatcher来处理IPC请求。
// the shared memory implementation of the IPC server. There should be one // of these objects per target (IPC client) process classSharedMemIPCServer { public: // Creates the IPC server. // target_process: handle to the target process. It must be suspended. It is // unfortunate to receive a raw handle (and store it inside this object) as // that dilutes ownership of the process, but in practice a SharedMemIPCServer // is owned by TargetProcess, which calls this method, and owns the handle, so // everything is safe. If that changes, we should break this dependency and // duplicate the handle instead. // target_process_id: process id of the target process. // thread_provider: a thread provider object. // dispatcher: an object that can service IPC calls. // 构造器很关键,数据结构的关联就在此处理,描述中得知一个关键的线索是SharedMemIPCServer由TargetProcess所持有 SharedMemIPCServer(HANDLE target_process, //关联target进程句柄和id DWORD target_process_id, ThreadProvider* thread_provider, //关联线程池 Dispatcher* dispatcher); //关联分发器
~SharedMemIPCServer();
// Initializes the server structures, shared memory structures and // creates the kernels events used to signal the IPC. // 看来还是有日常操作Init,用于初始化共享内存的尺寸、信道分割 boolInit(void* shared_mem, uint32_t shared_size, uint32_t channel_size);
private: // Allow tests to be marked DISABLED_. Note that FLAKY_ and FAILS_ prefixes // do not work with sandbox tests. FRIEND_TEST_ALL_PREFIXES(IPCTest, SharedMemServerTests); // When an event fires (IPC request). A thread from the ThreadProvider // will call this function. The context parameter should be the same as // provided when ThreadProvider::RegisterWait was called. // IPC请求到来时,ThreadProvider产生的thread会先调用这个static函数 // context就是ThreadProvider::RegisterWait调用时用到的context staticvoid __stdcall ThreadPingEventReady(void* context, unsignedchar);
// Makes the client and server events. This function is called once // per channel. // C/S之间的来回event make方法,因为是共享内存的IPC,所以填充的一方signal ping事件, // 表示收端可以处理了,而收端处理完signal pong事件,表示填充完结果请发端get // ping-pong的名称很有灵性啊 boolMakeEvents(base::win::ScopedHandle* server_ping, base::win::ScopedHandle* server_pong, HANDLE* client_ping, HANDLE* client_pong);
// A copy this structure is maintained per channel. // Note that a lot of the fields are just the same of what we have in the IPC // object itself. It is better to have the copies since we can dispatch in the // static method without worrying about converting back to a member function // call or about threading issues. // 定义了一个内部结构体,提取了一些IPC对象本体的成员,用于在static方法中dispatch structServerControl { ServerControl(); ~ServerControl();
// This channel server ping event. base::win::ScopedHandle ping_event; // This channel server pong event. base::win::ScopedHandle pong_event; // The size of this channel. uint32_t channel_size; // The pointer to the actual channel data. char* channel_buffer; // The pointer to the base of the shared memory. char* shared_base; // A pointer to this channel's client-side control structure this structure // lives in the shared memory. ChannelControl* channel; // the IPC dispatcher associated with this channel. Dispatcher* dispatcher; // The target process information associated with this channel. ClientInfo target_info; };
// Looks for the appropriate handler for this IPC and invokes it. // 这个是对IPC handler的查找与调用static方法 staticboolInvokeCallback(const ServerControl* service_context, void* ipc_buffer, CrossCallReturn* call_result);
// Points to the shared memory channel control which lives at // the start of the shared section. IPCControl* client_control_; //共享内存的起始结构,控制了channel的分割
// Keeps track of the server side objects that are used to answer an IPC. // 这里把ServerControl对象的指针做成链,在响应IPC时会用到 std::list<std::unique_ptr<ServerControl>> server_contexts_;
// The thread provider provides the threads that call back into this object // when the IPC events fire. // 当IPC事件到来时,thread_provider_就是那个负责发起线程来调用callback的线程池 ThreadProvider* thread_provider_;
// The IPC object is associated with a target process. // IPC对象关联的target进程句柄 HANDLE target_process_;
// The target process id associated with the IPC object. // IPC对象关联的target进程id DWORD target_process_id_;
// 显然thread_provider和dispatcher都不归该对象管理,SharedMemIPCServer只是他们的操纵者 SharedMemIPCServer::SharedMemIPCServer(HANDLE target_process, DWORD target_process_id, ThreadProvider* thread_provider, Dispatcher* dispatcher) : client_control_(nullptr), thread_provider_(thread_provider), target_process_(target_process), target_process_id_(target_process_id), call_dispatcher_(dispatcher) { // We create a initially owned mutex. If the server dies unexpectedly, // the thread that owns it will fail to release the lock and windows will // report to the target (when it tries to acquire it) that the wait was // abandoned. Note: We purposely leak the local handle because we want it to // be closed by Windows itself so it is properly marked as abandoned if the // server dies. if (!g_alive_mutex) { HANDLE mutex = ::CreateMutexW(nullptr, true, nullptr); // 教科书般的竞态处理 if (::InterlockedCompareExchangePointer(&g_alive_mutex, mutex, nullptr)) { // We lost the race to create the mutex. ::CloseHandle(mutex); } } }
SharedMemIPCServer::~SharedMemIPCServer() { // Free the wait handles associated with the thread pool. // 暂时先不关心thread_provider的实现机制,了解他的角色功用就行了 if (!thread_provider_->UnRegisterWaits(this)) { // Better to leak than to crash. return; } server_contexts_.clear();
if (client_control_) ::UnmapViewOfFile(client_control_); // 明显共享内存是client控制的,它是IPCControl起始的结构 }
boolSharedMemIPCServer::Init(void* shared_mem, uint32_t shared_size, uint32_t channel_size){ // The shared memory needs to be at least as big as a channel. // 共享内存大小至少得容纳一个channel if (shared_size < channel_size) { returnfalse; } // The channel size should be aligned. // 信道尺寸必须得是按32字节对齐 if (0 != (channel_size % 32)) { returnfalse; }
// Calculate how many channels we can fit in the shared memory. // 计算出共享内存可以容纳多少个信道(每个信道一次跑一个IPC调用) // 这里的计算方式与IPCControl结构有关,channels是IPCControl的最后一个flexible ChannelControl数组成员 // 扣除IPCControl的其他成员,剩余的尺寸都是承载ChannelControl的 // 关于IPCControl和ChannelControl,它们的结构在下面分析。 // 简单来说就是IPCControl控制了有多少个ChannelControl,而每个ChannelControl又控制了真实的channel buffer的偏移位置 // 这和crosscall的设计很像 shared_size -= offsetof(IPCControl, channels); size_t channel_count = shared_size / (sizeof(ChannelControl) + channel_size);
// If we cannot fit even one channel we bail out. // 一个信道都放不下,are you kidding me? if (0 == channel_count) { returnfalse; } //算出第一个真实信道的起始位置 // Calculate the start of the first channel. size_t base_start = (sizeof(ChannelControl) * channel_count) + offsetof(IPCControl, channels);
// This is the initialization that we do per-channel. Basically: // 1) make two events (ping & pong) // 2) create handles to the events for the client and the server. // 3) initialize the channel (client_context) with the state. // 4) initialize the server side of the channel (service_context). // 5) call the thread provider RegisterWait to register the ping events. // 每个channel都要做两个事件(ping & pong) // client对事件句柄的保存在client_context中,也就是shared_mem // server对事件句柄的保存在new出来的ServerControl中,每个ServerControl都丢入server_contexts_ for (size_t ix = 0; ix != channel_count; ++ix) { ChannelControl* client_context = &client_control_->channels[ix]; ServerControl* service_context = new ServerControl; server_contexts_.push_back(base::WrapUnique(service_context));//丢入的是智能指针
if (!MakeEvents(&service_context->ping_event, &service_context->pong_event, &client_context->ping_event, &client_context->pong_event)) { returnfalse; }
// Note that some of these values are available as members of this object // but we put them again into the service_context because we will be called // on a static method (ThreadPingEventReady). In particular, target_process_ // is a raw handle that is not owned by this object (it's owned by the // owner of this object), and we are storing it in multiple places. // 都放在service_context中是为了方便在静态方法ThreadPingEventReady调用 service_context->shared_base = reinterpret_cast<char*>(shared_mem); service_context->channel_size = channel_size; service_context->channel = client_context; service_context->channel_buffer = service_context->shared_base + client_context->channel_base; service_context->dispatcher = call_dispatcher_; service_context->target_info.process = target_process_; service_context->target_info.process_id = target_process_id_; // Advance to the next channel. base_start += channel_size; // Register the ping event with the threadpool. // 这里通过thread_provider的接口将ping事件与ThreadPingEventReady绑定,service_context作为 // ping事件signaled时,调用ThreadPingEventReady所携带的参数 thread_provider_->RegisterWait(this, service_context->ping_event.Get(), ThreadPingEventReady, service_context); } if (!::DuplicateHandle(::GetCurrentProcess(), g_alive_mutex, target_process_, &client_control_->server_alive, SYNCHRONIZE | EVENT_MODIFY_STATE, false, 0)) { returnfalse; } // This last setting indicates to the client all is setup. // 到此,所有的channel都实装了 client_control_->channels_count = channel_count; returntrue; }
// This function gets called by a thread from the thread pool when a // ping event fires. The context is the same as passed in the RegisterWait() // call above. // 客户端会把thread_provider_->RegisterWait绑定的service_context->ping_event置信 // 然后windows会回调ThreadPingEventReady,context实际上就是service_context这个ServerControl对象 void __stdcall SharedMemIPCServer::ThreadPingEventReady(void* context, unsignedchar){ if (!context) { DCHECK(false); return; } ServerControl* service_context = reinterpret_cast<ServerControl*>(context);//接驾 // Since the event fired, the channel *must* be busy. Change to kAckChannel // while we service it. // 对state的修改必须是原子操作,如果当前是kBusyChannel(这个应该由client设置) // 就设置成kAckChannel LONG last_state = ::InterlockedCompareExchange( &service_context->channel->state, kAckChannel, kBusyChannel); // 如果此前的状态不是kBusyChannel,说明调用异常 if (kBusyChannel != last_state) { DCHECK(false); return; }
// Prepare the result structure. At this point we will return some result // even if the IPC is invalid, malformed or has no handler. // CrossCallReturn是在这构造的。 CrossCallReturn call_result = {0}; // 追根溯源,实际上是sharedMem中的一个channel,每个IPC调用用到的是共享内存分割成的 // 众多信道中的一个,channel_buffer就是该channel的buffer起始 void* buffer = service_context->channel_buffer;
// Copy the answer back into the channel and signal the pong event. This // should wake up the client so it can finish the ipc cycle. // 把answer CrossCallReturn对象拷贝回与client共享的buffer // (call_params是个ActualCallParams,内部包含一个CrossCallReturn结构) // 然后将pong event置signaled // 切换channel的state状态到kAckChannel(这里又设置了一次,可能InvokeCallback中改了该值) CrossCallParams* call_params = reinterpret_cast<CrossCallParams*>(buffer); memcpy(call_params->GetCallReturn(), &call_result, sizeof(call_result)); ::InterlockedExchange(&service_context->channel->state, kAckChannel); ::SetEvent(service_context->pong_event.Get()); }
boolSharedMemIPCServer::InvokeCallback(const ServerControl* service_context, void* ipc_buffer, CrossCallReturn* call_result){ // Set the default error code; SetCallError(SBOX_ERROR_INVALID_IPC, call_result); // 设置的是call_result的call_outcome uint32_t output_size = 0; // Parse, verify and copy the message. The handler operates on a copy // of the message so the client cannot play dirty tricks by changing the // data in the channel while the IPC is being processed. // server用一个CrossCallParamsEx来承载channel buffer中的数据 // output_size会返回channel buffer中真实有效的数据尺寸(传输的内容一般没有填满channel buffer) std::unique_ptr<CrossCallParamsEx> params(CrossCallParamsEx::CreateFromBuffer( ipc_buffer, service_context->channel_size, &output_size)); if (!params.get()) returnfalse;
// IPCParams是用于在OnMessageReady中判断该种类型IPC是否是本dispatcher可以处理的参考 // dispatcher会预置很多IPCParams,而到来的IPC调用就用它的IPCParams来lookup // // Releases memory allocated for IPC arguments, if needed. voidReleaseArgs(const IPCParams* ipc_params, void* args[kMaxIpcParams]){ for (size_t i = 0; i < kMaxIpcParams; i++) { switch (ipc_params->args[i]) { // 其实只有两种要delete资源,一种是字符串,一种是INOUTPTR case WCHAR_TYPE: { deletereinterpret_cast<base::string16*>(args[i]); args[i] = nullptr; break; } case INOUTPTR_TYPE: { deletereinterpret_cast<CountedBuffer*>(args[i]); args[i] = nullptr; break; } default: break; } } }
// Fills up the list of arguments (args and ipc_params) for an IPC call. boolGetArgs(CrossCallParamsEx* params, IPCParams* ipc_params, void* args[kMaxIpcParams]){ if (kMaxIpcParams < params->GetParamsCount()) returnfalse;
boolSharedMemIPCServer::MakeEvents(base::win::ScopedHandle* server_ping, base::win::ScopedHandle* server_pong, HANDLE* client_ping, HANDLE* client_pong){ // Note that the IPC client has no right to delete the events. That would // cause problems. The server *owns* the events. // event的拥有者是Server端,client无权删除 const DWORD kDesiredAccess = SYNCHRONIZE | EVENT_MODIFY_STATE;
// The events are auto reset, and start not signaled. // 实际上就是简单的CreateEventW封装,因为client和server各持有一个handle,所以要DuplicateHandle // 给了target进程也就是client端的event句柄 server_ping->Set(::CreateEventW(nullptr, false, false, nullptr)); if (!::DuplicateHandle(::GetCurrentProcess(), server_ping->Get(), target_process_, client_ping, kDesiredAccess, false, 0)) { returnfalse; }
// IPC transport implementation that uses shared memory. // This is the client side // // The shared memory is divided on blocks called channels, and potentially // it can perform as many concurrent IPC calls as channels. The IPC over // each channel is strictly synchronous for the client. // 共享内存划分成多个channel,这就可以并发处理IPC调用 // // Each channel as a channel control section associated with. Each control // section has two kernel events (known as ping and pong) and a integer // variable that maintains a state // // this is the state diagram of a channel: // // locked in service // kFreeChannel---------->BusyChannel-------------->kAckChannel // ^ | // |_________________________________________________| // answer ready // // The protocol is as follows: // 1) client finds a free channel: state = kFreeChannel // 2) does an atomic compare-and-swap, now state = BusyChannel // 3) client writes the data into the channel buffer // 4) client signals the ping event and waits (blocks) on the pong event // 5) eventually the server signals the pong event // 6) the client awakes and reads the answer from the same channel // 7) the client updates its InOut parameters with the new data from the // shared memory section. // 8) the client atomically sets the state = kFreeChannel // // In the shared memory the layout is as follows: // // [ channel count ] // [ channel control 0] // [ channel control 1] // [ channel control N] // [ channel buffer 0 ] 1024 bytes // [ channel buffer 1 ] 1024 bytes // [ channel buffer N ] 1024 bytes // // By default each channel buffer is 1024 bytes
// shared memory起始,一层结构 structIPCControl { // total number of channels available, some might be busy at a given time size_t channels_count; // handle to a shared mutex to detect when the server is dead HANDLE server_alive; // array of channel control structures ChannelControl channels[1]; };
// 二层结构,一层的channels_count决定了它的数量,附着在一层数据正后方 // the channel control structure structChannelControl { // points to be beginning of the channel buffer, where data goes size_t channel_base; // maintains the state from the ChannelState enumeration volatile LONG state; // the ping event is signaled by the client when the IPC data is ready on // the buffer HANDLE ping_event; // the client waits on the pong event for the IPC answer back HANDLE pong_event; // the IPC unique identifier uint32_t ipc_tag; };
// the actual shared memory IPC implementation class. This object is designed // to be lightweight so it can be constructed on-site (at the calling place) // wherever an IPC call is needed. // 设计成了一个轻量对象,可以在IPC调用发起时,在发端简单的构造出来 classSharedMemIPCClient { public: // Creates the IPC client. // as parameter it takes the base address of the shared memory // 显式声明,对于这种单简单类型参数的构造器都要设置explicit,防止编译器自作聪明的隐式转换 // 可以看出来shared_mem也并不是Client控制的 explicitSharedMemIPCClient(void* shared_mem);
// locks a free channel and returns the channel buffer memory base. This call // blocks until there is a free channel // 占坑,找个free态channel,如果没有的话会阻塞 void* GetBuffer();
// releases the lock on the channel, for other to use. call this if you have // called GetBuffer and you want to abort but have not called yet DoCall() // 释放channel的锁,使用情景在你通过GetBuffer获得了channel但却在调用DoCall()前想要终止 voidFreeBuffer(void* buffer);
// Performs the actual IPC call. // params: The blob of packed input parameters. // answer: upon IPC completion, it contains the server answer to the IPC. // If the return value is not SBOX_ERROR_CHANNEL_ERROR, the caller has to free // the channel. // returns ALL_OK if the IPC mechanism successfully delivered. You still need // to check on the answer structure to see the actual IPC result. // 这个DoCall就是关键的IPC调用了,这个接口实际上在crosscall中就已经看过了,当时比较奇怪的是 // CrossCallParams内部已经有CrossCallReturn了,为什么还要额外传一个answer ResultCode DoCall(CrossCallParams* params, CrossCallReturn* answer);
private: // Returns the index of the first free channel. It sets 'severe_failure' // to true if there is an unrecoverable error that does not allow to // find a channel. // 这货一看就是给GetBuffer用的内部helper函数 size_tLockFreeChannel(bool* severe_failure); // Return the channel index given the address of the buffer. // 由buffer的address反推channel的索引 size_tChannelIndexFromBuffer(constvoid* buffer); IPCControl* control_; // client端也是这个结构,指向共享内存起始 // point to the first channel base char* first_base_; // 三级channel结构的起始地址 };
构造器:
1 2 3 4 5 6 7 8 9 10 11
// The constructor simply casts the shared memory to the internal // structures. This is a cheap step that is why this IPC object can // and should be constructed per call. // 共享内存也不是client维护的,而是外部传过来的。 SharedMemIPCClient::SharedMemIPCClient(void* shared_mem) : control_(reinterpret_cast<IPCControl*>(shared_mem)) { first_base_ = reinterpret_cast<char*>(shared_mem) + control_->channels[0].channel_base; // There must be at least one channel. DCHECK(0 != control_->channels_count); }
// Locking a channel is a simple as looping over all the channels // looking for one that is has state = kFreeChannel and atomically // swapping it to kBusyChannel. // If there is no free channel, then we must back off so some other // thread makes progress and frees a channel. To back off we sleep. size_tSharedMemIPCClient::LockFreeChannel(bool* severe_failure){ if (0 == control_->channels_count) { *severe_failure = true; return0; } //对channel做遍历,找第一个kFreeChannel态的channel,如果找到就置为busy态 ChannelControl* channel = control_->channels; do { for (size_t ix = 0; ix != control_->channels_count; ++ix) { if (kFreeChannel == ::InterlockedCompareExchange( &channel[ix].state, kBusyChannel, kFreeChannel)) { *severe_failure = false; return ix; } } // We did not find any available channel, maybe the server is dead. // 如果当前没有空闲channel的话,先判断一下server是否还在,挂了就直接failure吧,也没有IPC的必要了 // 否则就一直轮询 DWORD wait = ::WaitForSingleObject(control_->server_alive, kIPCWaitTimeOut2); if (WAIT_TIMEOUT != wait) { // The server is dead and we outlive it enough to get in trouble. *severe_failure = true; return0; } } while (true); }
// Find out which channel we are from the pointer returned by GetBuffer. // 简单的四则运算,基操,勿6 size_tSharedMemIPCClient::ChannelIndexFromBuffer(constvoid* buffer){ ptrdiff_t d = reinterpret_cast<constchar*>(buffer) - first_base_; size_t num = d / kIPCChannelSize; DCHECK_LT(num, control_->channels_count); return (num); }
// Get the base of the data buffer of the channel; this is where the input // parameters get serialized. Since they get serialized directly into the // channel we avoid one copy. // 获取一个free态channel void* SharedMemIPCClient::GetBuffer(){ bool failure = false; size_t ix = LockFreeChannel(&failure); // 这种表示server已经挂了 if (failure) returnnullptr; // 基本四则运算 returnreinterpret_cast<char*>(control_) + control_->channels[ix].channel_base; }
// If we need to cancel an IPC before issuing DoCall // our client should call FreeBuffer with the same pointer // returned by GetBuffer. // 这里的free不是释放channel buffer,而是把channel置为free态,表示可用 voidSharedMemIPCClient::FreeBuffer(void* buffer){ size_t num = ChannelIndexFromBuffer(buffer); ChannelControl* channel = control_->channels; LONG result = ::InterlockedExchange(&channel[num].state, kFreeChannel); DCHECK_NE(kFreeChannel, static_cast<ChannelState>(result)); }
// Do the IPC. At this point the channel should have already been // filled with the serialized input parameters. // We follow the pattern explained in the header file. // params和answer都是外部传入的,我们毕竟只是个驱动器,承载什么消息是驱动者决定的 ResultCode SharedMemIPCClient::DoCall(CrossCallParams* params, CrossCallReturn* answer){ // server要健在 if (!control_->server_alive) return SBOX_ERROR_CHANNEL_ERROR;
// channel的buffer就是params对象本身(params->GetBuffer返回this) // channel buffer承载了crosscall_client中看到的输入结构 // 外部在调用该函数时,应该已经把params放入了shareMem的某个channel中,这里素质二连算出num,主要是为了直接操纵channel[num]来把参数导入 size_t num = ChannelIndexFromBuffer(params->GetBuffer()); ChannelControl* channel = control_->channels; // Note that the IPC tag goes outside the buffer as well inside // the buffer. This should enable the server to prioritize based on // IPC tags without having to de-serialize the entire message. channel[num].ipc_tag = params->GetTag();
// Wait for the server to service this IPC call. After kIPCWaitTimeOut1 // we check if the server_alive mutex was abandoned which will indicate // that the server has died.
// While the atomic signaling and waiting is not a requirement, it // is nice because we save a trip to kernel. // 填充好channelcontrol的参数后,就可以直接signaled ping通知server了 // 然后在pong上等待,等待时间kIPCWaitTimeOut1 DWORD wait = ::SignalObjectAndWait(channel[num].ping_event, channel[num].pong_event, kIPCWaitTimeOut1, false); if (WAIT_TIMEOUT == wait) { // The server is taking too long. Enter a loop were we check if the // server_alive mutex has been abandoned which would signal a server crash // or else we keep waiting for a response. // 如果超时了,那么得探测一下server是否还存活,是否只是因为繁忙而暂时没处理IPC while (true) { wait = ::WaitForSingleObject(control_->server_alive, 0); if (WAIT_TIMEOUT == wait) { // Server seems still alive. We already signaled so here we just wait. wait = ::WaitForSingleObject(channel[num].pong_event, kIPCWaitTimeOut1); if (WAIT_OBJECT_0 == wait) { // The server took a long time but responded. break; } elseif (WAIT_TIMEOUT == wait) { continue; } else { return SBOX_ERROR_CHANNEL_ERROR; } } else { // The server has crashed and windows has signaled the mutex as // abandoned. // server崩溃了,玩个J8 ::InterlockedExchange(&channel[num].state, kAbandonedChannel); control_->server_alive = 0; return SBOX_ERROR_CHANNEL_ERROR; } } } elseif (WAIT_OBJECT_0 != wait) { // Probably the server crashed before the kIPCWaitTimeOut1 occurred. return SBOX_ERROR_CHANNEL_ERROR; }
// 按常规操作,WAIT_OBJECT_0这个返回值就对了,channel buffer实际上就是params对象 // server处理后params已经被重置了,其中params的CrossCallReturn对象被填充好, // 如果有INOUT型参数的话,那么params本身的输入参数也被修改了。 // Client端把params的CrossCallReturn对象抽离出来,拷贝到了传入的answer参数 // 到这儿也就明白了为什么此前参数中既然已有了CrossCallParams还要再传一个CrossCallReturn // 原来只是为了纯粹的剥离params,二者实际上是同一个东西,DoCall返回后answer还有用 // The server has returned an answer, copy it and free the channel. memcpy(answer, params->GetCallReturn(), sizeof(CrossCallReturn));
// Return the IPC state It can indicate that while the IPC has // completed some error in the Broker has caused to not return valid // results. return answer->call_outcome; // 这个是本次IPC调用的成功或失败的状态码 }
// Construct the IPC server and the IPC dispatcher. When the target does // an IPC it will eventually call the dispatcher. // TargetProcess对象会挂到broker创建出来的PolicyBase对象中管理 // 一定要搞清楚TargetProcess是broker的,不是target的,它是broker中用于表示target进程的结构 // 上层传入的是: // ret = target->Init(dispatcher_.get(), policy_, kIPCMemSize, kPolMemSize, &win_error); // dispatcher_就是用于在OnMessageReady中匹配参数的那货 // policy_是low-level-policy的buffer,这个暂时不关心 // kIPCMemSize是SharedMemIPC所用buffer的尺寸,这里设置成了2页 // kPolMemSize是low-level-policy的buffer尺寸,设置成了14页,这个暂时不关心 ResultCode TargetProcess::Init(Dispatcher* ipc_dispatcher, void* policy, uint32_t shared_IPC_size, uint32_t shared_policy_size, DWORD* win_error){ // We need to map the shared memory on the target. This is necessary for // any IPC that needs to take place, even if the target has not yet hit // the main( ) function or even has initialized the CRT. So here we set // the handle to the shared section. The target on the first IPC must do // the rest, which boils down to calling MapViewofFile()
// We use this single memory pool for IPC and for policy. DWORD shared_mem_size = static_cast<DWORD>(shared_IPC_size + shared_policy_size); // 总算找到你了,这里是把SharedMemIPC和low-level-policy的共享内存一并创建出来 shared_section_.Set(::CreateFileMappingW(INVALID_HANDLE_VALUE, nullptr, PAGE_READWRITE | SEC_COMMIT, 0, shared_mem_size, nullptr)); if (!shared_section_.IsValid()) { *win_error = ::GetLastError(); return SBOX_ERROR_CREATE_FILE_MAPPING; }
// Top level dispatcher which hands requests to the appropriate service // dispatchers. // 看起来是个顶层的dispatcher,维护了多个子系统dispatcher,用于按类别分发IPC请求 classTopLevelDispatcher : public Dispatcher { public: // |policy| must outlive this class. explicitTopLevelDispatcher(PolicyBase* policy); ~TopLevelDispatcher() override;
Dispatcher* OnMessageReady(IPCParams* ipc, CallbackGeneric* callback)override; // 还记得Dispatcher类的该函数吗,它当初的注释是这样的: // Called when a target proces is created, to setup the interceptions related // with the given service (IPC). // Interception看起来是施加给IPC请求的一个拦截,但它如何奏效,得在分析Interception的机制后才会明白 boolSetupService(InterceptionManager* manager, int service)override;
private: // Test IPC provider. boolPing(IPCInfo* ipc, void* cookie);
// Returns a dispatcher from ipc_targets_. // 想必是OnMessageReady内部会根据IPCParams携带的ipc_tag来找到具体的下层Dispatcher Dispatcher* GetDispatcher(int ipc_tag);
enum { IPC_UNUSED_TAG = 0, IPC_PING1_TAG, // Takes a cookie in parameters and returns the cookie // multiplied by 2 and the tick_count. Used for testing only. IPC_PING2_TAG, // Takes an in/out cookie in parameters and modify the cookie // to be multiplied by 3. Used for testing only. IPC_NTCREATEFILE_TAG, IPC_NTOPENFILE_TAG, IPC_NTQUERYATTRIBUTESFILE_TAG, IPC_NTQUERYFULLATTRIBUTESFILE_TAG, IPC_NTSETINFO_RENAME_TAG, IPC_CREATENAMEDPIPEW_TAG, IPC_NTOPENTHREAD_TAG, IPC_NTOPENPROCESS_TAG, IPC_NTOPENPROCESSTOKEN_TAG, IPC_NTOPENPROCESSTOKENEX_TAG, IPC_CREATEPROCESSW_TAG, IPC_CREATEEVENT_TAG, IPC_OPENEVENT_TAG, IPC_NTCREATEKEY_TAG, IPC_NTOPENKEY_TAG, IPC_GDI_GDIDLLINITIALIZE_TAG, IPC_GDI_GETSTOCKOBJECT_TAG, IPC_USER_REGISTERCLASSW_TAG, IPC_CREATETHREAD_TAG, IPC_USER_ENUMDISPLAYMONITORS_TAG, IPC_USER_ENUMDISPLAYDEVICES_TAG, IPC_USER_GETMONITORINFO_TAG, IPC_GDI_CREATEOPMPROTECTEDOUTPUTS_TAG, IPC_GDI_GETCERTIFICATE_TAG, IPC_GDI_GETCERTIFICATESIZE_TAG, IPC_GDI_DESTROYOPMPROTECTEDOUTPUT_TAG, IPC_GDI_CONFIGUREOPMPROTECTEDOUTPUT_TAG, IPC_GDI_GETOPMINFORMATION_TAG, IPC_GDI_GETOPMRANDOMNUMBER_TAG, IPC_GDI_GETSUGGESTEDOPMPROTECTEDOUTPUTARRAYSIZE_TAG, IPC_GDI_SETOPMSIGNINGKEYANDSEQUENCENUMBERS_TAG, IPC_LAST_TAG };
// When an IPC is ready in any of the targets we get called. We manage an array // of IPC dispatchers which are keyed on the IPC tag so we normally delegate // to the appropriate dispatcher unless we can handle the IPC call ourselves. Dispatcher* TopLevelDispatcher::OnMessageReady(IPCParams* ipc, CallbackGeneric* callback){ DCHECK(callback); staticconst IPCParams ping1 = {IPC_PING1_TAG, {UINT32_TYPE}}; staticconst IPCParams ping2 = {IPC_PING2_TAG, {INOUTPTR_TYPE}};
// Creates a server thread that answers the IPC so slow that is guaranteed to // trigger the time-out code path in the client. A second thread is created // to hold locked the server_alive mutex: this signals the client that the // server is not dead and it retries the wait. TEST(IPCTest, ClientSlowServer) { size_t base_start = 0; IPCControl* client_control = MakeChannels(kIPCChannelSize, 4096 * 2, &base_start); // 实际上并没有共享,只是new了一片空间 FixChannels(client_control, base_start, kIPCChannelSize, FIX_PONG_NOT_READY); client_control->server_alive = ::CreateMutex(nullptr, false, nullptr);
char* mem = reinterpret_cast<char*>(client_control); SharedMemIPCClient client(mem); // 局部做出一个SharedMemIPCClient对象
// Helper function to make the fake shared memory with some // basic elements initialized. IPCControl* MakeChannels(size_t channel_size, size_t total_shared_size, size_t* base_start){ // Allocate memory char* mem = newchar[total_shared_size];// new一片内存空间,部署好IPCControl memset(mem, 0, total_shared_size); // Calculate how many channels we can fit in the shared memory. total_shared_size -= offsetof(IPCControl, channels); size_t channel_count = total_shared_size / (sizeof(ChannelControl) + channel_size); // Calculate the start of the first channel. *base_start = (sizeof(ChannelControl) * channel_count) + offsetof(IPCControl, channels); // Setup client structure. IPCControl* client_control = reinterpret_cast<IPCControl*>(mem); client_control->channels_count = channel_count; return client_control; }